2024-12-05 13:56:23,261 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@6f204a1a 2024-12-05 13:56:23,272 main DEBUG Took 0.009354 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-05 13:56:23,273 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-05 13:56:23,273 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-05 13:56:23,274 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-05 13:56:23,275 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 13:56:23,289 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-05 13:56:23,300 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 13:56:23,302 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 13:56:23,303 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 13:56:23,303 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 13:56:23,303 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 13:56:23,304 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 13:56:23,305 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 13:56:23,305 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 13:56:23,305 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 13:56:23,306 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 13:56:23,306 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 13:56:23,307 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 13:56:23,307 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 13:56:23,307 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 13:56:23,308 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 13:56:23,308 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 13:56:23,309 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 13:56:23,309 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 13:56:23,309 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 13:56:23,309 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 13:56:23,310 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 13:56:23,310 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 13:56:23,310 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 13:56:23,311 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 13:56:23,311 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 13:56:23,311 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-05 13:56:23,313 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 13:56:23,314 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-05 13:56:23,316 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-05 13:56:23,316 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-05 13:56:23,317 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-05 13:56:23,317 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-05 13:56:23,325 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-05 13:56:23,328 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-05 13:56:23,330 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-05 13:56:23,330 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-05 13:56:23,330 main DEBUG createAppenders(={Console}) 2024-12-05 13:56:23,331 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@6f204a1a initialized 2024-12-05 13:56:23,331 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@6f204a1a 2024-12-05 13:56:23,332 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@6f204a1a OK. 2024-12-05 13:56:23,332 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-05 13:56:23,332 main DEBUG OutputStream closed 2024-12-05 13:56:23,333 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-05 13:56:23,333 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-05 13:56:23,333 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@2c35e847 OK 2024-12-05 13:56:23,399 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-05 13:56:23,401 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-05 13:56:23,402 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-05 13:56:23,403 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-05 13:56:23,404 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-05 13:56:23,404 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-05 13:56:23,404 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-05 13:56:23,405 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-05 13:56:23,405 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-05 13:56:23,405 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-05 13:56:23,405 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-05 13:56:23,406 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-05 13:56:23,406 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-05 13:56:23,406 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-05 13:56:23,407 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-05 13:56:23,407 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-05 13:56:23,407 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-05 13:56:23,408 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-05 13:56:23,410 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-05 13:56:23,410 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@42b02722) with optional ClassLoader: null 2024-12-05 13:56:23,411 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-05 13:56:23,411 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@42b02722] started OK. 2024-12-05T13:56:23,423 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.master.TestMasterFailoverBalancerPersistence timeout: 13 mins 2024-12-05 13:56:23,426 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-05 13:56:23,427 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-05T13:56:23,459 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: master.TestMasterFailoverBalancerPersistence#testMasterFailoverBalancerPersistence Thread=11, OpenFileDescriptor=285, MaxFileDescriptor=1048576, SystemLoadAverage=96, ProcessCount=11, AvailableMemoryMB=8990 2024-12-05T13:56:23,674 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2aaa6615-3e06-822a-2602-8bbea1b11ccb 2024-12-05T13:56:23,675 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=3, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-05T13:56:23,695 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2aaa6615-3e06-822a-2602-8bbea1b11ccb/cluster_8553c555-5bc8-a989-5e4c-cbd56a079f87, deleteOnExit=true 2024-12-05T13:56:23,695 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-05T13:56:23,697 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2aaa6615-3e06-822a-2602-8bbea1b11ccb/test.cache.data in system properties and HBase conf 2024-12-05T13:56:23,697 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2aaa6615-3e06-822a-2602-8bbea1b11ccb/hadoop.tmp.dir in system properties and HBase conf 2024-12-05T13:56:23,698 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2aaa6615-3e06-822a-2602-8bbea1b11ccb/hadoop.log.dir in system properties and HBase conf 2024-12-05T13:56:23,698 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2aaa6615-3e06-822a-2602-8bbea1b11ccb/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-05T13:56:23,699 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2aaa6615-3e06-822a-2602-8bbea1b11ccb/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-05T13:56:23,700 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-05T13:56:23,779 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-05T13:56:23,872 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-05T13:56:23,876 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2aaa6615-3e06-822a-2602-8bbea1b11ccb/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-05T13:56:23,876 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2aaa6615-3e06-822a-2602-8bbea1b11ccb/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-05T13:56:23,877 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2aaa6615-3e06-822a-2602-8bbea1b11ccb/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-05T13:56:23,877 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2aaa6615-3e06-822a-2602-8bbea1b11ccb/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-05T13:56:23,878 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2aaa6615-3e06-822a-2602-8bbea1b11ccb/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-05T13:56:23,879 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2aaa6615-3e06-822a-2602-8bbea1b11ccb/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-05T13:56:23,879 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2aaa6615-3e06-822a-2602-8bbea1b11ccb/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-05T13:56:23,880 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2aaa6615-3e06-822a-2602-8bbea1b11ccb/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-05T13:56:23,880 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2aaa6615-3e06-822a-2602-8bbea1b11ccb/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-05T13:56:23,881 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2aaa6615-3e06-822a-2602-8bbea1b11ccb/nfs.dump.dir in system properties and HBase conf 2024-12-05T13:56:23,881 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2aaa6615-3e06-822a-2602-8bbea1b11ccb/java.io.tmpdir in system properties and HBase conf 2024-12-05T13:56:23,882 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2aaa6615-3e06-822a-2602-8bbea1b11ccb/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-05T13:56:23,882 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2aaa6615-3e06-822a-2602-8bbea1b11ccb/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-05T13:56:23,883 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2aaa6615-3e06-822a-2602-8bbea1b11ccb/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-05T13:56:24,760 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-05T13:56:24,824 INFO [Time-limited test {}] log.Log(170): Logging initialized @2166ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-05T13:56:24,883 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T13:56:24,933 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T13:56:24,949 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T13:56:24,949 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T13:56:24,951 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-05T13:56:24,961 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T13:56:24,963 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@d13f332{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2aaa6615-3e06-822a-2602-8bbea1b11ccb/hadoop.log.dir/,AVAILABLE} 2024-12-05T13:56:24,964 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@61fd4728{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T13:56:25,119 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@26c59a36{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2aaa6615-3e06-822a-2602-8bbea1b11ccb/java.io.tmpdir/jetty-localhost-33461-hadoop-hdfs-3_4_1-tests_jar-_-any-5757528104182894865/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-05T13:56:25,129 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@40c01bb1{HTTP/1.1, (http/1.1)}{localhost:33461} 2024-12-05T13:56:25,129 INFO [Time-limited test {}] server.Server(415): Started @2473ms 2024-12-05T13:56:25,581 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T13:56:25,588 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T13:56:25,589 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T13:56:25,589 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T13:56:25,589 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-05T13:56:25,590 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4f4c4215{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2aaa6615-3e06-822a-2602-8bbea1b11ccb/hadoop.log.dir/,AVAILABLE} 2024-12-05T13:56:25,590 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5e882389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T13:56:25,684 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7182828b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2aaa6615-3e06-822a-2602-8bbea1b11ccb/java.io.tmpdir/jetty-localhost-37933-hadoop-hdfs-3_4_1-tests_jar-_-any-13619390709057860063/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T13:56:25,684 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@11857d05{HTTP/1.1, (http/1.1)}{localhost:37933} 2024-12-05T13:56:25,685 INFO [Time-limited test {}] server.Server(415): Started @3028ms 2024-12-05T13:56:25,728 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-05T13:56:26,476 WARN [Thread-72 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2aaa6615-3e06-822a-2602-8bbea1b11ccb/cluster_8553c555-5bc8-a989-5e4c-cbd56a079f87/data/data1/current/BP-1787308914-172.17.0.2-1733406984368/current, will proceed with Du for space computation calculation, 2024-12-05T13:56:26,476 WARN [Thread-73 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2aaa6615-3e06-822a-2602-8bbea1b11ccb/cluster_8553c555-5bc8-a989-5e4c-cbd56a079f87/data/data2/current/BP-1787308914-172.17.0.2-1733406984368/current, will proceed with Du for space computation calculation, 2024-12-05T13:56:26,499 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-05T13:56:26,544 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9594cad4b76c6553 with lease ID 0x2ed293c8b1f1cbde: Processing first storage report for DS-3dfab4d5-5bae-4d95-9e3c-7448b7c77506 from datanode DatanodeRegistration(127.0.0.1:43231, datanodeUuid=6fec998d-ad39-446b-92bd-2efdcf2be506, infoPort=36253, infoSecurePort=0, ipcPort=37157, storageInfo=lv=-57;cid=testClusterID;nsid=898768493;c=1733406984368) 2024-12-05T13:56:26,545 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9594cad4b76c6553 with lease ID 0x2ed293c8b1f1cbde: from storage DS-3dfab4d5-5bae-4d95-9e3c-7448b7c77506 node DatanodeRegistration(127.0.0.1:43231, datanodeUuid=6fec998d-ad39-446b-92bd-2efdcf2be506, infoPort=36253, infoSecurePort=0, ipcPort=37157, storageInfo=lv=-57;cid=testClusterID;nsid=898768493;c=1733406984368), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-12-05T13:56:26,546 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9594cad4b76c6553 with lease ID 0x2ed293c8b1f1cbde: Processing first storage report for DS-8b83e53c-9ea5-4ccb-9a6b-ceb0d8303aea from datanode DatanodeRegistration(127.0.0.1:43231, datanodeUuid=6fec998d-ad39-446b-92bd-2efdcf2be506, infoPort=36253, infoSecurePort=0, ipcPort=37157, storageInfo=lv=-57;cid=testClusterID;nsid=898768493;c=1733406984368) 2024-12-05T13:56:26,546 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9594cad4b76c6553 with lease ID 0x2ed293c8b1f1cbde: from storage DS-8b83e53c-9ea5-4ccb-9a6b-ceb0d8303aea node DatanodeRegistration(127.0.0.1:43231, datanodeUuid=6fec998d-ad39-446b-92bd-2efdcf2be506, infoPort=36253, infoSecurePort=0, ipcPort=37157, storageInfo=lv=-57;cid=testClusterID;nsid=898768493;c=1733406984368), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T13:56:26,613 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2aaa6615-3e06-822a-2602-8bbea1b11ccb 2024-12-05T13:56:26,686 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2aaa6615-3e06-822a-2602-8bbea1b11ccb/cluster_8553c555-5bc8-a989-5e4c-cbd56a079f87/zookeeper_0, clientPort=62287, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2aaa6615-3e06-822a-2602-8bbea1b11ccb/cluster_8553c555-5bc8-a989-5e4c-cbd56a079f87/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2aaa6615-3e06-822a-2602-8bbea1b11ccb/cluster_8553c555-5bc8-a989-5e4c-cbd56a079f87/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-05T13:56:26,695 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=62287 2024-12-05T13:56:26,703 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T13:56:26,707 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T13:56:26,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43231 is added to blk_1073741825_1001 (size=7) 2024-12-05T13:56:27,312 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a with version=8 2024-12-05T13:56:27,312 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/hbase-staging 2024-12-05T13:56:27,381 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-05T13:56:27,597 INFO [Time-limited test {}] client.ConnectionUtils(128): master/da6aa2204f50:0 server-side Connection retries=45 2024-12-05T13:56:27,606 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T13:56:27,606 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T13:56:27,611 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T13:56:27,611 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T13:56:27,611 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T13:56:27,727 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-05T13:56:27,778 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-05T13:56:27,787 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-05T13:56:27,790 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T13:56:27,812 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 102342 (auto-detected) 2024-12-05T13:56:27,813 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-05T13:56:27,831 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33783 2024-12-05T13:56:27,850 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:33783 connecting to ZooKeeper ensemble=127.0.0.1:62287 2024-12-05T13:56:27,954 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:337830x0, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T13:56:27,957 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33783-0x101a71134240000 connected 2024-12-05T13:56:28,037 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T13:56:28,041 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T13:56:28,053 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33783-0x101a71134240000, quorum=127.0.0.1:62287, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T13:56:28,056 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a, hbase.cluster.distributed=false 2024-12-05T13:56:28,074 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33783-0x101a71134240000, quorum=127.0.0.1:62287, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T13:56:28,078 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33783 2024-12-05T13:56:28,079 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33783 2024-12-05T13:56:28,079 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33783 2024-12-05T13:56:28,080 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33783 2024-12-05T13:56:28,080 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33783 2024-12-05T13:56:28,084 INFO [Time-limited test {}] client.ConnectionUtils(128): master/da6aa2204f50:0 server-side Connection retries=45 2024-12-05T13:56:28,084 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T13:56:28,084 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T13:56:28,084 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T13:56:28,084 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T13:56:28,084 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T13:56:28,084 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-05T13:56:28,085 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T13:56:28,086 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46553 2024-12-05T13:56:28,087 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:46553 connecting to ZooKeeper ensemble=127.0.0.1:62287 2024-12-05T13:56:28,097 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:465530x0, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T13:56:28,098 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46553-0x101a71134240001 connected 2024-12-05T13:56:28,115 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T13:56:28,117 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T13:56:28,120 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46553-0x101a71134240001, quorum=127.0.0.1:62287, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T13:56:28,121 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a, hbase.cluster.distributed=false 2024-12-05T13:56:28,124 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46553-0x101a71134240001, quorum=127.0.0.1:62287, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T13:56:28,125 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46553 2024-12-05T13:56:28,125 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46553 2024-12-05T13:56:28,125 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46553 2024-12-05T13:56:28,126 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46553 2024-12-05T13:56:28,126 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46553 2024-12-05T13:56:28,129 INFO [Time-limited test {}] client.ConnectionUtils(128): master/da6aa2204f50:0 server-side Connection retries=45 2024-12-05T13:56:28,129 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T13:56:28,129 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T13:56:28,129 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T13:56:28,129 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T13:56:28,130 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T13:56:28,130 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-05T13:56:28,131 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T13:56:28,132 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44809 2024-12-05T13:56:28,133 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:44809 connecting to ZooKeeper ensemble=127.0.0.1:62287 2024-12-05T13:56:28,142 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:448090x0, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T13:56:28,143 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:44809-0x101a71134240002 connected 2024-12-05T13:56:28,157 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T13:56:28,161 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T13:56:28,166 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44809-0x101a71134240002, quorum=127.0.0.1:62287, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T13:56:28,166 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a, hbase.cluster.distributed=false 2024-12-05T13:56:28,168 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44809-0x101a71134240002, quorum=127.0.0.1:62287, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T13:56:28,169 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44809 2024-12-05T13:56:28,169 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44809 2024-12-05T13:56:28,170 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44809 2024-12-05T13:56:28,170 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44809 2024-12-05T13:56:28,171 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44809 2024-12-05T13:56:28,253 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/da6aa2204f50:0 server-side Connection retries=45 2024-12-05T13:56:28,255 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T13:56:28,255 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T13:56:28,255 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T13:56:28,255 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T13:56:28,256 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T13:56:28,258 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-05T13:56:28,260 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T13:56:28,260 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46569 2024-12-05T13:56:28,262 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46569 connecting to ZooKeeper ensemble=127.0.0.1:62287 2024-12-05T13:56:28,263 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T13:56:28,265 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T13:56:28,276 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:465690x0, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T13:56:28,276 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:465690x0, quorum=127.0.0.1:62287, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T13:56:28,276 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46569-0x101a71134240003 connected 2024-12-05T13:56:28,280 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-05T13:56:28,289 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-05T13:56:28,291 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46569-0x101a71134240003, quorum=127.0.0.1:62287, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-05T13:56:28,295 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46569-0x101a71134240003, quorum=127.0.0.1:62287, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T13:56:28,296 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46569 2024-12-05T13:56:28,296 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46569 2024-12-05T13:56:28,297 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46569 2024-12-05T13:56:28,297 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46569 2024-12-05T13:56:28,298 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46569 2024-12-05T13:56:28,311 DEBUG [M:1;da6aa2204f50:46553 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:1;da6aa2204f50:46553 2024-12-05T13:56:28,312 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/da6aa2204f50,46553,1733406988083 2024-12-05T13:56:28,316 DEBUG [M:2;da6aa2204f50:44809 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:2;da6aa2204f50:44809 2024-12-05T13:56:28,316 DEBUG [M:0;da6aa2204f50:33783 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;da6aa2204f50:33783 2024-12-05T13:56:28,317 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/da6aa2204f50,44809,1733406988128 2024-12-05T13:56:28,317 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/da6aa2204f50,33783,1733406987455 2024-12-05T13:56:28,326 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44809-0x101a71134240002, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T13:56:28,326 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46553-0x101a71134240001, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T13:56:28,326 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33783-0x101a71134240000, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T13:56:28,326 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46569-0x101a71134240003, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T13:56:28,335 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46553-0x101a71134240001, quorum=127.0.0.1:62287, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/da6aa2204f50,46553,1733406988083 2024-12-05T13:56:28,335 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33783-0x101a71134240000, quorum=127.0.0.1:62287, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/da6aa2204f50,33783,1733406987455 2024-12-05T13:56:28,336 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44809-0x101a71134240002, quorum=127.0.0.1:62287, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/da6aa2204f50,44809,1733406988128 2024-12-05T13:56:28,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46553-0x101a71134240001, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:56:28,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33783-0x101a71134240000, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:56:28,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46569-0x101a71134240003, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-05T13:56:28,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44809-0x101a71134240002, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:56:28,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46569-0x101a71134240003, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:56:28,360 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33783-0x101a71134240000, quorum=127.0.0.1:62287, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-05T13:56:28,360 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46553-0x101a71134240001, quorum=127.0.0.1:62287, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-05T13:56:28,360 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44809-0x101a71134240002, quorum=127.0.0.1:62287, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-05T13:56:28,362 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/da6aa2204f50,33783,1733406987455 from backup master directory 2024-12-05T13:56:28,362 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] master.ActiveMasterManager(296): Another master is the active master, da6aa2204f50,33783,1733406987455; waiting to become the next active master 2024-12-05T13:56:28,362 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] master.ActiveMasterManager(296): Another master is the active master, da6aa2204f50,33783,1733406987455; waiting to become the next active master 2024-12-05T13:56:28,372 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33783-0x101a71134240000, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/da6aa2204f50,33783,1733406987455 2024-12-05T13:56:28,372 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44809-0x101a71134240002, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T13:56:28,372 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46569-0x101a71134240003, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T13:56:28,372 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46553-0x101a71134240001, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T13:56:28,372 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33783-0x101a71134240000, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T13:56:28,373 WARN [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T13:56:28,373 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=da6aa2204f50,33783,1733406987455 2024-12-05T13:56:28,376 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-05T13:56:28,378 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-05T13:56:28,433 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/hbase.id] with ID: df4662d0-9548-4c10-a6a7-e701c5555560 2024-12-05T13:56:28,434 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/.tmp/hbase.id 2024-12-05T13:56:28,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43231 is added to blk_1073741826_1002 (size=42) 2024-12-05T13:56:28,849 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/.tmp/hbase.id]:[hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/hbase.id] 2024-12-05T13:56:28,897 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T13:56:28,902 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-05T13:56:28,920 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 16ms. 2024-12-05T13:56:28,959 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33783-0x101a71134240000, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:56:28,959 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46569-0x101a71134240003, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:56:28,959 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46553-0x101a71134240001, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:56:28,959 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44809-0x101a71134240002, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:56:28,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43231 is added to blk_1073741827_1003 (size=196) 2024-12-05T13:56:29,402 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T13:56:29,403 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-05T13:56:29,416 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:150) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:174) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:262) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:231) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:400) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:1003) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2535) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:613) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.lambda$tracedRunnable$2(TraceUtil.java:155) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T13:56:29,419 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T13:56:29,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43231 is added to blk_1073741828_1004 (size=1189) 2024-12-05T13:56:29,869 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/data/master/store 2024-12-05T13:56:29,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43231 is added to blk_1073741829_1005 (size=34) 2024-12-05T13:56:30,298 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-05T13:56:30,301 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T13:56:30,302 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-05T13:56:30,303 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T13:56:30,303 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T13:56:30,304 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-05T13:56:30,305 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T13:56:30,305 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T13:56:30,306 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733406990302Disabling compacts and flushes for region at 1733406990302Disabling writes for close at 1733406990304 (+2 ms)Writing region close event to WAL at 1733406990305 (+1 ms)Closed at 1733406990305 2024-12-05T13:56:30,309 WARN [master/da6aa2204f50:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/data/master/store/.initializing 2024-12-05T13:56:30,309 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/WALs/da6aa2204f50,33783,1733406987455 2024-12-05T13:56:30,317 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-05T13:56:30,331 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=da6aa2204f50%2C33783%2C1733406987455, suffix=, logDir=hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/WALs/da6aa2204f50,33783,1733406987455, archiveDir=hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/oldWALs, maxLogs=10 2024-12-05T13:56:30,351 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/WALs/da6aa2204f50,33783,1733406987455/da6aa2204f50%2C33783%2C1733406987455.1733406990336, exclude list is [], retry=0 2024-12-05T13:56:30,366 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43231,DS-3dfab4d5-5bae-4d95-9e3c-7448b7c77506,DISK] 2024-12-05T13:56:30,368 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-05T13:56:30,398 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/WALs/da6aa2204f50,33783,1733406987455/da6aa2204f50%2C33783%2C1733406987455.1733406990336 2024-12-05T13:56:30,399 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36253:36253)] 2024-12-05T13:56:30,400 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-05T13:56:30,400 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T13:56:30,402 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T13:56:30,403 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T13:56:30,436 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T13:56:30,458 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-05T13:56:30,461 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:56:30,463 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T13:56:30,464 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T13:56:30,467 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-05T13:56:30,467 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:56:30,468 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:56:30,468 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T13:56:30,471 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-05T13:56:30,471 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:56:30,472 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:56:30,472 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T13:56:30,476 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-05T13:56:30,476 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:56:30,477 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:56:30,477 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T13:56:30,480 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-05T13:56:30,482 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-05T13:56:30,487 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T13:56:30,487 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T13:56:30,491 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-05T13:56:30,495 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T13:56:30,499 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T13:56:30,500 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64416215, jitterRate=-0.0401235967874527}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-05T13:56:30,506 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733406990414Initializing all the Stores at 1733406990416 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733406990417 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406990417Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406990417Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406990417Cleaning up temporary data from old regions at 1733406990487 (+70 ms)Region opened successfully at 1733406990505 (+18 ms) 2024-12-05T13:56:30,507 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-05T13:56:30,538 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5bc926e0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=da6aa2204f50/172.17.0.2:0 2024-12-05T13:56:30,565 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-05T13:56:30,574 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-05T13:56:30,574 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-05T13:56:30,577 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-05T13:56:30,578 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-05T13:56:30,584 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 5 msec 2024-12-05T13:56:30,584 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-05T13:56:30,609 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-05T13:56:30,616 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33783-0x101a71134240000, quorum=127.0.0.1:62287, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-05T13:56:30,659 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-05T13:56:30,663 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-05T13:56:30,665 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33783-0x101a71134240000, quorum=127.0.0.1:62287, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-05T13:56:30,675 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-05T13:56:30,679 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-05T13:56:30,683 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33783-0x101a71134240000, quorum=127.0.0.1:62287, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-05T13:56:30,692 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-05T13:56:30,694 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33783-0x101a71134240000, quorum=127.0.0.1:62287, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-05T13:56:30,700 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-05T13:56:30,722 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33783-0x101a71134240000, quorum=127.0.0.1:62287, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-05T13:56:30,730 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-05T13:56:30,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46553-0x101a71134240001, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T13:56:30,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46569-0x101a71134240003, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T13:56:30,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33783-0x101a71134240000, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T13:56:30,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44809-0x101a71134240002, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T13:56:30,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46553-0x101a71134240001, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:56:30,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33783-0x101a71134240000, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:56:30,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44809-0x101a71134240002, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:56:30,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46569-0x101a71134240003, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:56:30,747 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=da6aa2204f50,33783,1733406987455, sessionid=0x101a71134240000, setting cluster-up flag (Was=false) 2024-12-05T13:56:30,776 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33783-0x101a71134240000, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:56:30,776 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44809-0x101a71134240002, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:56:30,776 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46553-0x101a71134240001, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:56:30,776 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46569-0x101a71134240003, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:56:30,801 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-05T13:56:30,805 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=da6aa2204f50,33783,1733406987455 2024-12-05T13:56:30,826 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44809-0x101a71134240002, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:56:30,826 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46569-0x101a71134240003, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:56:30,826 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33783-0x101a71134240000, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:56:30,826 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46553-0x101a71134240001, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:56:30,851 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-05T13:56:30,854 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=da6aa2204f50,33783,1733406987455 2024-12-05T13:56:30,866 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-05T13:56:30,905 INFO [RS:0;da6aa2204f50:46569 {}] regionserver.HRegionServer(746): ClusterId : df4662d0-9548-4c10-a6a7-e701c5555560 2024-12-05T13:56:30,907 DEBUG [RS:0;da6aa2204f50:46569 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-05T13:56:30,919 DEBUG [RS:0;da6aa2204f50:46569 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-05T13:56:30,919 DEBUG [RS:0;da6aa2204f50:46569 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-05T13:56:30,927 DEBUG [RS:0;da6aa2204f50:46569 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-05T13:56:30,927 DEBUG [RS:0;da6aa2204f50:46569 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c7fbe3f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=da6aa2204f50/172.17.0.2:0 2024-12-05T13:56:30,936 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-05T13:56:30,940 DEBUG [RS:0;da6aa2204f50:46569 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;da6aa2204f50:46569 2024-12-05T13:56:30,943 INFO [RS:0;da6aa2204f50:46569 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-05T13:56:30,943 INFO [RS:0;da6aa2204f50:46569 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-05T13:56:30,943 DEBUG [RS:0;da6aa2204f50:46569 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-05T13:56:30,945 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-05T13:56:30,946 INFO [RS:0;da6aa2204f50:46569 {}] regionserver.HRegionServer(2659): reportForDuty to master=da6aa2204f50,33783,1733406987455 with port=46569, startcode=1733406988224 2024-12-05T13:56:30,952 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-05T13:56:30,955 DEBUG [RS:0;da6aa2204f50:46569 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-05T13:56:30,957 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: da6aa2204f50,33783,1733406987455 Number of backup masters: 2 da6aa2204f50,44809,1733406988128 da6aa2204f50,46553,1733406988083 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-05T13:56:30,963 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/da6aa2204f50:0, corePoolSize=5, maxPoolSize=5 2024-12-05T13:56:30,964 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/da6aa2204f50:0, corePoolSize=5, maxPoolSize=5 2024-12-05T13:56:30,964 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/da6aa2204f50:0, corePoolSize=5, maxPoolSize=5 2024-12-05T13:56:30,964 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/da6aa2204f50:0, corePoolSize=5, maxPoolSize=5 2024-12-05T13:56:30,964 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/da6aa2204f50:0, corePoolSize=10, maxPoolSize=10 2024-12-05T13:56:30,964 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/da6aa2204f50:0, corePoolSize=1, maxPoolSize=1 2024-12-05T13:56:30,964 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/da6aa2204f50:0, corePoolSize=2, maxPoolSize=2 2024-12-05T13:56:30,964 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/da6aa2204f50:0, corePoolSize=1, maxPoolSize=1 2024-12-05T13:56:30,966 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733407020966 2024-12-05T13:56:30,968 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-05T13:56:30,969 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-05T13:56:30,969 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-05T13:56:30,970 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-05T13:56:30,973 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-05T13:56:30,973 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-05T13:56:30,973 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-05T13:56:30,974 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-05T13:56:30,974 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-05T13:56:30,977 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:56:30,977 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-05T13:56:30,980 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-05T13:56:30,981 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-05T13:56:30,982 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-05T13:56:30,984 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-05T13:56:30,985 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-05T13:56:30,987 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/da6aa2204f50:0:becomeActiveMaster-HFileCleaner.large.0-1733406990986,5,FailOnTimeoutGroup] 2024-12-05T13:56:30,987 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/da6aa2204f50:0:becomeActiveMaster-HFileCleaner.small.0-1733406990987,5,FailOnTimeoutGroup] 2024-12-05T13:56:30,988 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-05T13:56:30,988 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-05T13:56:30,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43231 is added to blk_1073741831_1007 (size=1321) 2024-12-05T13:56:30,989 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-05T13:56:30,989 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-05T13:56:30,993 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52311, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-05T13:56:30,998 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33783 {}] master.ServerManager(363): Checking decommissioned status of RegionServer da6aa2204f50,46569,1733406988224 2024-12-05T13:56:31,001 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33783 {}] master.ServerManager(517): Registering regionserver=da6aa2204f50,46569,1733406988224 2024-12-05T13:56:31,013 DEBUG [RS:0;da6aa2204f50:46569 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a 2024-12-05T13:56:31,013 DEBUG [RS:0;da6aa2204f50:46569 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44371 2024-12-05T13:56:31,013 DEBUG [RS:0;da6aa2204f50:46569 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-05T13:56:31,046 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33783-0x101a71134240000, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T13:56:31,046 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46553-0x101a71134240001, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T13:56:31,046 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44809-0x101a71134240002, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T13:56:31,048 DEBUG [RS:0;da6aa2204f50:46569 {}] zookeeper.ZKUtil(111): regionserver:46569-0x101a71134240003, quorum=127.0.0.1:62287, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/da6aa2204f50,46569,1733406988224 2024-12-05T13:56:31,048 WARN [RS:0;da6aa2204f50:46569 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T13:56:31,048 INFO [RS:0;da6aa2204f50:46569 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T13:56:31,049 DEBUG [RS:0;da6aa2204f50:46569 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/WALs/da6aa2204f50,46569,1733406988224 2024-12-05T13:56:31,052 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [da6aa2204f50,46569,1733406988224] 2024-12-05T13:56:31,076 INFO [RS:0;da6aa2204f50:46569 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-05T13:56:31,087 INFO [RS:0;da6aa2204f50:46569 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-05T13:56:31,092 INFO [RS:0;da6aa2204f50:46569 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-05T13:56:31,092 INFO [RS:0;da6aa2204f50:46569 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T13:56:31,093 INFO [RS:0;da6aa2204f50:46569 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-05T13:56:31,098 INFO [RS:0;da6aa2204f50:46569 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-05T13:56:31,099 INFO [RS:0;da6aa2204f50:46569 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-05T13:56:31,100 DEBUG [RS:0;da6aa2204f50:46569 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/da6aa2204f50:0, corePoolSize=1, maxPoolSize=1 2024-12-05T13:56:31,100 DEBUG [RS:0;da6aa2204f50:46569 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/da6aa2204f50:0, corePoolSize=1, maxPoolSize=1 2024-12-05T13:56:31,100 DEBUG [RS:0;da6aa2204f50:46569 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/da6aa2204f50:0, corePoolSize=1, maxPoolSize=1 2024-12-05T13:56:31,100 DEBUG [RS:0;da6aa2204f50:46569 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/da6aa2204f50:0, corePoolSize=1, maxPoolSize=1 2024-12-05T13:56:31,100 DEBUG [RS:0;da6aa2204f50:46569 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/da6aa2204f50:0, corePoolSize=1, maxPoolSize=1 2024-12-05T13:56:31,100 DEBUG [RS:0;da6aa2204f50:46569 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/da6aa2204f50:0, corePoolSize=2, maxPoolSize=2 2024-12-05T13:56:31,101 DEBUG [RS:0;da6aa2204f50:46569 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/da6aa2204f50:0, corePoolSize=1, maxPoolSize=1 2024-12-05T13:56:31,101 DEBUG [RS:0;da6aa2204f50:46569 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/da6aa2204f50:0, corePoolSize=1, maxPoolSize=1 2024-12-05T13:56:31,101 DEBUG [RS:0;da6aa2204f50:46569 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/da6aa2204f50:0, corePoolSize=1, maxPoolSize=1 2024-12-05T13:56:31,101 DEBUG [RS:0;da6aa2204f50:46569 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/da6aa2204f50:0, corePoolSize=1, maxPoolSize=1 2024-12-05T13:56:31,101 DEBUG [RS:0;da6aa2204f50:46569 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/da6aa2204f50:0, corePoolSize=1, maxPoolSize=1 2024-12-05T13:56:31,101 DEBUG [RS:0;da6aa2204f50:46569 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/da6aa2204f50:0, corePoolSize=1, maxPoolSize=1 2024-12-05T13:56:31,101 DEBUG [RS:0;da6aa2204f50:46569 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/da6aa2204f50:0, corePoolSize=3, maxPoolSize=3 2024-12-05T13:56:31,102 DEBUG [RS:0;da6aa2204f50:46569 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/da6aa2204f50:0, corePoolSize=3, maxPoolSize=3 2024-12-05T13:56:31,102 INFO [RS:0;da6aa2204f50:46569 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T13:56:31,103 INFO [RS:0;da6aa2204f50:46569 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T13:56:31,103 INFO [RS:0;da6aa2204f50:46569 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T13:56:31,103 INFO [RS:0;da6aa2204f50:46569 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-05T13:56:31,103 INFO [RS:0;da6aa2204f50:46569 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-05T13:56:31,103 INFO [RS:0;da6aa2204f50:46569 {}] hbase.ChoreService(168): Chore ScheduledChore name=da6aa2204f50,46569,1733406988224-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T13:56:31,118 INFO [RS:0;da6aa2204f50:46569 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-05T13:56:31,120 INFO [RS:0;da6aa2204f50:46569 {}] hbase.ChoreService(168): Chore ScheduledChore name=da6aa2204f50,46569,1733406988224-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T13:56:31,120 INFO [RS:0;da6aa2204f50:46569 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T13:56:31,121 INFO [RS:0;da6aa2204f50:46569 {}] regionserver.Replication(171): da6aa2204f50,46569,1733406988224 started 2024-12-05T13:56:31,137 INFO [RS:0;da6aa2204f50:46569 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T13:56:31,137 INFO [RS:0;da6aa2204f50:46569 {}] regionserver.HRegionServer(1482): Serving as da6aa2204f50,46569,1733406988224, RpcServer on da6aa2204f50/172.17.0.2:46569, sessionid=0x101a71134240003 2024-12-05T13:56:31,138 DEBUG [RS:0;da6aa2204f50:46569 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-05T13:56:31,138 DEBUG [RS:0;da6aa2204f50:46569 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager da6aa2204f50,46569,1733406988224 2024-12-05T13:56:31,138 DEBUG [RS:0;da6aa2204f50:46569 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'da6aa2204f50,46569,1733406988224' 2024-12-05T13:56:31,139 DEBUG [RS:0;da6aa2204f50:46569 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-05T13:56:31,140 DEBUG [RS:0;da6aa2204f50:46569 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-05T13:56:31,140 DEBUG [RS:0;da6aa2204f50:46569 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-05T13:56:31,140 DEBUG [RS:0;da6aa2204f50:46569 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-05T13:56:31,140 DEBUG [RS:0;da6aa2204f50:46569 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager da6aa2204f50,46569,1733406988224 2024-12-05T13:56:31,140 DEBUG [RS:0;da6aa2204f50:46569 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'da6aa2204f50,46569,1733406988224' 2024-12-05T13:56:31,141 DEBUG [RS:0;da6aa2204f50:46569 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-05T13:56:31,141 DEBUG [RS:0;da6aa2204f50:46569 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-05T13:56:31,142 DEBUG [RS:0;da6aa2204f50:46569 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-05T13:56:31,142 INFO [RS:0;da6aa2204f50:46569 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-05T13:56:31,142 INFO [RS:0;da6aa2204f50:46569 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-05T13:56:31,252 INFO [RS:0;da6aa2204f50:46569 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-05T13:56:31,255 INFO [RS:0;da6aa2204f50:46569 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=da6aa2204f50%2C46569%2C1733406988224, suffix=, logDir=hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/WALs/da6aa2204f50,46569,1733406988224, archiveDir=hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/oldWALs, maxLogs=32 2024-12-05T13:56:31,272 DEBUG [RS:0;da6aa2204f50:46569 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/WALs/da6aa2204f50,46569,1733406988224/da6aa2204f50%2C46569%2C1733406988224.1733406991258, exclude list is [], retry=0 2024-12-05T13:56:31,276 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43231,DS-3dfab4d5-5bae-4d95-9e3c-7448b7c77506,DISK] 2024-12-05T13:56:31,279 INFO [RS:0;da6aa2204f50:46569 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/WALs/da6aa2204f50,46569,1733406988224/da6aa2204f50%2C46569%2C1733406988224.1733406991258 2024-12-05T13:56:31,280 DEBUG [RS:0;da6aa2204f50:46569 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36253:36253)] 2024-12-05T13:56:31,394 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-05T13:56:31,395 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a 2024-12-05T13:56:31,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43231 is added to blk_1073741833_1009 (size=32) 2024-12-05T13:56:31,814 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T13:56:31,818 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-05T13:56:31,822 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-05T13:56:31,822 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:56:31,823 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T13:56:31,823 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-05T13:56:31,825 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-05T13:56:31,825 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:56:31,826 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T13:56:31,826 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-05T13:56:31,829 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-05T13:56:31,829 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:56:31,829 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T13:56:31,830 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-05T13:56:31,832 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-05T13:56:31,832 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:56:31,833 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T13:56:31,833 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-05T13:56:31,834 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/data/hbase/meta/1588230740 2024-12-05T13:56:31,835 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/data/hbase/meta/1588230740 2024-12-05T13:56:31,838 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-05T13:56:31,838 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-05T13:56:31,839 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-05T13:56:31,841 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-05T13:56:31,845 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T13:56:31,846 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72358914, jitterRate=0.07823184132575989}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-05T13:56:31,848 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733406991815Initializing all the Stores at 1733406991818 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733406991818Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733406991818Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406991818Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733406991818Cleaning up temporary data from old regions at 1733406991838 (+20 ms)Region opened successfully at 1733406991848 (+10 ms) 2024-12-05T13:56:31,848 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-05T13:56:31,848 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-05T13:56:31,848 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-05T13:56:31,848 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-05T13:56:31,848 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-05T13:56:31,849 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-05T13:56:31,849 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733406991848Disabling compacts and flushes for region at 1733406991848Disabling writes for close at 1733406991848Writing region close event to WAL at 1733406991849 (+1 ms)Closed at 1733406991849 2024-12-05T13:56:31,852 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-05T13:56:31,852 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-05T13:56:31,857 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-05T13:56:31,864 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-05T13:56:31,867 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-05T13:56:32,022 DEBUG [da6aa2204f50:33783 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-05T13:56:32,035 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=da6aa2204f50,46569,1733406988224 2024-12-05T13:56:32,040 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as da6aa2204f50,46569,1733406988224, state=OPENING 2024-12-05T13:56:32,084 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-05T13:56:32,092 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44809-0x101a71134240002, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:56:32,093 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46569-0x101a71134240003, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:56:32,093 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33783-0x101a71134240000, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:56:32,093 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46553-0x101a71134240001, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:56:32,094 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T13:56:32,094 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T13:56:32,095 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T13:56:32,095 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T13:56:32,099 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-05T13:56:32,102 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=da6aa2204f50,46569,1733406988224}] 2024-12-05T13:56:32,285 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-05T13:56:32,288 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39211, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-05T13:56:32,298 INFO [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-05T13:56:32,298 INFO [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T13:56:32,299 INFO [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-05T13:56:32,302 INFO [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=da6aa2204f50%2C46569%2C1733406988224.meta, suffix=.meta, logDir=hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/WALs/da6aa2204f50,46569,1733406988224, archiveDir=hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/oldWALs, maxLogs=32 2024-12-05T13:56:32,317 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/WALs/da6aa2204f50,46569,1733406988224/da6aa2204f50%2C46569%2C1733406988224.meta.1733406992303.meta, exclude list is [], retry=0 2024-12-05T13:56:32,321 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43231,DS-3dfab4d5-5bae-4d95-9e3c-7448b7c77506,DISK] 2024-12-05T13:56:32,324 INFO [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/WALs/da6aa2204f50,46569,1733406988224/da6aa2204f50%2C46569%2C1733406988224.meta.1733406992303.meta 2024-12-05T13:56:32,325 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36253:36253)] 2024-12-05T13:56:32,325 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-05T13:56:32,327 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-05T13:56:32,329 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-05T13:56:32,333 INFO [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-05T13:56:32,337 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-05T13:56:32,338 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T13:56:32,338 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-05T13:56:32,338 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-05T13:56:32,342 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-05T13:56:32,343 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-05T13:56:32,343 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:56:32,344 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T13:56:32,344 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-05T13:56:32,346 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-05T13:56:32,346 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:56:32,347 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T13:56:32,347 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-05T13:56:32,348 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-05T13:56:32,348 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:56:32,349 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T13:56:32,349 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-05T13:56:32,351 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-05T13:56:32,351 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:56:32,352 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T13:56:32,352 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-05T13:56:32,353 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/data/hbase/meta/1588230740 2024-12-05T13:56:32,356 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/data/hbase/meta/1588230740 2024-12-05T13:56:32,358 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-05T13:56:32,359 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-05T13:56:32,360 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-05T13:56:32,362 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-05T13:56:32,364 INFO [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61241934, jitterRate=-0.08742406964302063}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-05T13:56:32,364 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-05T13:56:32,366 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733406992339Writing region info on filesystem at 1733406992339Initializing all the Stores at 1733406992341 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733406992341Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733406992341Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406992341Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733406992341Cleaning up temporary data from old regions at 1733406992359 (+18 ms)Running coprocessor post-open hooks at 1733406992364 (+5 ms)Region opened successfully at 1733406992366 (+2 ms) 2024-12-05T13:56:32,373 INFO [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733406992274 2024-12-05T13:56:32,384 DEBUG [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-05T13:56:32,385 INFO [RS_OPEN_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-05T13:56:32,386 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=da6aa2204f50,46569,1733406988224 2024-12-05T13:56:32,388 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as da6aa2204f50,46569,1733406988224, state=OPEN 2024-12-05T13:56:32,397 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46569-0x101a71134240003, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T13:56:32,397 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33783-0x101a71134240000, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T13:56:32,397 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46553-0x101a71134240001, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T13:56:32,397 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44809-0x101a71134240002, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T13:56:32,397 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T13:56:32,397 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T13:56:32,397 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T13:56:32,397 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T13:56:32,398 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=da6aa2204f50,46569,1733406988224 2024-12-05T13:56:32,403 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-05T13:56:32,403 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=da6aa2204f50,46569,1733406988224 in 297 msec 2024-12-05T13:56:32,409 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-05T13:56:32,410 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 547 msec 2024-12-05T13:56:32,411 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-05T13:56:32,411 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-05T13:56:32,429 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T13:56:32,430 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=da6aa2204f50,46569,1733406988224, seqNum=-1] 2024-12-05T13:56:32,451 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T13:56:32,453 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34883, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T13:56:32,477 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.5810 sec 2024-12-05T13:56:32,477 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733406992477, completionTime=-1 2024-12-05T13:56:32,480 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-05T13:56:32,480 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-05T13:56:32,509 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-05T13:56:32,509 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733407052509 2024-12-05T13:56:32,510 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733407112510 2024-12-05T13:56:32,510 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 29 msec 2024-12-05T13:56:32,513 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=da6aa2204f50,33783,1733406987455-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T13:56:32,513 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=da6aa2204f50,33783,1733406987455-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T13:56:32,513 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=da6aa2204f50,33783,1733406987455-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T13:56:32,515 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-da6aa2204f50:33783, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T13:56:32,516 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-05T13:56:32,516 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-05T13:56:32,522 DEBUG [master/da6aa2204f50:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-05T13:56:32,541 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 4.166sec 2024-12-05T13:56:32,542 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-05T13:56:32,543 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-05T13:56:32,544 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-05T13:56:32,544 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-05T13:56:32,545 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-05T13:56:32,545 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=da6aa2204f50,33783,1733406987455-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T13:56:32,546 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=da6aa2204f50,33783,1733406987455-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-05T13:56:32,555 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-05T13:56:32,556 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-05T13:56:32,556 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=da6aa2204f50,33783,1733406987455-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T13:56:32,617 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54c2d4b8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T13:56:32,619 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-05T13:56:32,619 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-05T13:56:32,621 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request da6aa2204f50,44809,-1 for getting cluster id 2024-12-05T13:56:32,624 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T13:56:32,635 DEBUG [HMaster-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'df4662d0-9548-4c10-a6a7-e701c5555560' 2024-12-05T13:56:32,638 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T13:56:32,638 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "df4662d0-9548-4c10-a6a7-e701c5555560" 2024-12-05T13:56:32,640 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79617c06, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T13:56:32,640 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [da6aa2204f50,33783,-1, da6aa2204f50,46553,-1, da6aa2204f50,44809,-1] 2024-12-05T13:56:32,642 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T13:56:32,644 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T13:56:32,645 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T13:56:32,646 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41476, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T13:56:32,647 INFO [HMaster-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50778, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T13:56:32,649 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@13b392dc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T13:56:32,650 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T13:56:32,652 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T13:56:32,656 INFO [HMaster-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34718, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T13:56:32,659 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=da6aa2204f50,46569,1733406988224, seqNum=-1] 2024-12-05T13:56:32,659 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T13:56:32,662 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41998, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T13:56:32,680 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=da6aa2204f50,33783,1733406987455 2024-12-05T13:56:32,680 INFO [Time-limited test {}] hbase.SingleProcessHBaseCluster(618): Stopping Thread[M:0;da6aa2204f50:33783,5,FailOnTimeoutGroup] 2024-12-05T13:56:32,681 INFO [Time-limited test {}] master.HMaster(3321): ***** STOPPING master 'da6aa2204f50,33783,1733406987455' ***** 2024-12-05T13:56:32,681 INFO [Time-limited test {}] master.HMaster(3323): STOPPED: Stopping master 0 2024-12-05T13:56:32,681 INFO [M:0;da6aa2204f50:33783 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T13:56:32,681 INFO [M:0;da6aa2204f50:33783 {}] client.AsyncConnectionImpl(233): Connection has been closed by M:0;da6aa2204f50:33783. 2024-12-05T13:56:32,681 DEBUG [M:0;da6aa2204f50:33783 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:630) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T13:56:32,686 DEBUG [M:0;da6aa2204f50:33783 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T13:56:32,686 INFO [M:0;da6aa2204f50:33783 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T13:56:32,686 DEBUG [M:0;da6aa2204f50:33783 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-05T13:56:32,686 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-05T13:56:32,686 DEBUG [master/da6aa2204f50:0:becomeActiveMaster-HFileCleaner.large.0-1733406990986 {}] cleaner.HFileCleaner(306): Exit Thread[master/da6aa2204f50:0:becomeActiveMaster-HFileCleaner.large.0-1733406990986,5,FailOnTimeoutGroup] 2024-12-05T13:56:32,686 DEBUG [master/da6aa2204f50:0:becomeActiveMaster-HFileCleaner.small.0-1733406990987 {}] cleaner.HFileCleaner(306): Exit Thread[master/da6aa2204f50:0:becomeActiveMaster-HFileCleaner.small.0-1733406990987,5,FailOnTimeoutGroup] 2024-12-05T13:56:32,687 DEBUG [M:0;da6aa2204f50:33783 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-05T13:56:32,704 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46569-0x101a71134240003, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-05T13:56:32,704 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46569-0x101a71134240003, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:56:32,705 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46569-0x101a71134240003, quorum=127.0.0.1:62287, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-05T13:56:32,706 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46553-0x101a71134240001, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-05T13:56:32,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46553-0x101a71134240001, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:56:32,707 INFO [M:0;da6aa2204f50:33783 {}] hbase.ChoreService(370): Chore service for: master/da6aa2204f50:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-05T13:56:32,707 INFO [M:0;da6aa2204f50:33783 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T13:56:32,707 DEBUG [M:0;da6aa2204f50:33783 {}] master.HMaster(1795): Stopping service threads 2024-12-05T13:56:32,707 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46553-0x101a71134240001, quorum=127.0.0.1:62287, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-05T13:56:32,707 DEBUG [zk-event-processor-pool-0 {}] master.ActiveMasterManager(204): No master available. Notifying waiting threads 2024-12-05T13:56:32,707 INFO [M:0;da6aa2204f50:33783 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-05T13:56:32,708 INFO [M:0;da6aa2204f50:33783 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-05T13:56:32,708 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44809-0x101a71134240002, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-05T13:56:32,708 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44809-0x101a71134240002, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:56:32,708 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33783-0x101a71134240000, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-05T13:56:32,708 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33783-0x101a71134240000, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:56:32,709 INFO [Time-limited test {}] hbase.LocalHBaseCluster(362): Waiting on da6aa2204f50,33783,1733406987455 2024-12-05T13:56:32,709 INFO [M:0;da6aa2204f50:33783 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-05T13:56:32,709 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-05T13:56:32,717 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46569-0x101a71134240003, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-05T13:56:32,717 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46553-0x101a71134240001, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-05T13:56:32,717 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46553-0x101a71134240001, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:56:32,717 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46569-0x101a71134240003, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:56:32,718 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:44809-0x101a71134240002, quorum=127.0.0.1:62287, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-05T13:56:32,718 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46553-0x101a71134240001, quorum=127.0.0.1:62287, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-05T13:56:32,718 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:46553-0x101a71134240001, quorum=127.0.0.1:62287, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-05T13:56:32,719 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/da6aa2204f50,46553,1733406988083 from backup master directory 2024-12-05T13:56:32,719 INFO [M:0;da6aa2204f50:33783 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/.lastflushedseqids 2024-12-05T13:56:32,725 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46569-0x101a71134240003, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T13:56:32,725 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44809-0x101a71134240002, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T13:56:32,726 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33783-0x101a71134240000, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T13:56:32,726 WARN [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T13:56:32,726 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=da6aa2204f50,46553,1733406988083 2024-12-05T13:56:32,727 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46553-0x101a71134240001, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/da6aa2204f50,46553,1733406988083 2024-12-05T13:56:32,727 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46553-0x101a71134240001, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T13:56:32,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43231 is added to blk_1073741835_1011 (size=99) 2024-12-05T13:56:32,753 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T13:56:32,753 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-05T13:56:32,768 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=1) cost 15ms. 2024-12-05T13:56:32,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43231 is added to blk_1073741836_1012 (size=196) 2024-12-05T13:56:33,143 INFO [M:0;da6aa2204f50:33783 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-05T13:56:33,143 INFO [M:0;da6aa2204f50:33783 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-05T13:56:33,144 DEBUG [M:0;da6aa2204f50:33783 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-05T13:56:33,144 INFO [M:0;da6aa2204f50:33783 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T13:56:33,144 DEBUG [M:0;da6aa2204f50:33783 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T13:56:33,144 DEBUG [M:0;da6aa2204f50:33783 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-05T13:56:33,144 DEBUG [M:0;da6aa2204f50:33783 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T13:56:33,151 INFO [M:0;da6aa2204f50:33783 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.62 KB heapSize=11.22 KB 2024-12-05T13:56:33,184 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T13:56:33,184 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-05T13:56:33,185 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T13:56:33,196 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] region.MasterRegion(342): old store file tracker DEFAULT is the same with new store file tracker, skip migration 2024-12-05T13:56:33,202 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] region.MasterRegion(316): Renamed hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/WALs/da6aa2204f50,33783,1733406987455 to hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/WALs/da6aa2204f50,33783,1733406987455-dead as it is dead 2024-12-05T13:56:33,204 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-12-05T13:56:33,204 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-12-05T13:56:33,204 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/WALs/da6aa2204f50,33783,1733406987455-dead/da6aa2204f50%2C33783%2C1733406987455.1733406990336 2024-12-05T13:56:33,207 WARN [IPC Server handler 2 on default port 44371 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/WALs/da6aa2204f50,33783,1733406987455-dead/da6aa2204f50%2C33783%2C1733406987455.1733406990336 has not been closed. Lease recovery is in progress. RecoveryId = 1013 for block blk_1073741830_1006 2024-12-05T13:56:33,209 DEBUG [M:0;da6aa2204f50:33783 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/623d87d7230f45e482a98e4622b84c81 is 82, key is hbase:meta,,1/info:regioninfo/1733406992385/Put/seqid=0 2024-12-05T13:56:33,211 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/WALs/da6aa2204f50,33783,1733406987455-dead/da6aa2204f50%2C33783%2C1733406987455.1733406990336 after 5ms 2024-12-05T13:56:33,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43231 is added to blk_1073741837_1014 (size=5672) 2024-12-05T13:56:33,620 INFO [M:0;da6aa2204f50:33783 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=28 (bloomFilter=true), to=hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/623d87d7230f45e482a98e4622b84c81 2024-12-05T13:56:33,671 DEBUG [M:0;da6aa2204f50:33783 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ac471bf175054fb9805c08d91d6d038f is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1733406992474/Put/seqid=0 2024-12-05T13:56:33,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43231 is added to blk_1073741838_1015 (size=5275) 2024-12-05T13:56:34,077 INFO [M:0;da6aa2204f50:33783 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=28 (bloomFilter=true), to=hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ac471bf175054fb9805c08d91d6d038f 2024-12-05T13:56:34,105 DEBUG [M:0;da6aa2204f50:33783 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b0d6aa7724a44e96af3759028db5faf1 is 69, key is da6aa2204f50,46569,1733406988224/rs:state/1733406991003/Put/seqid=0 2024-12-05T13:56:34,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43231 is added to blk_1073741839_1016 (size=5156) 2024-12-05T13:56:34,512 INFO [M:0;da6aa2204f50:33783 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=28 (bloomFilter=true), to=hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b0d6aa7724a44e96af3759028db5faf1 2024-12-05T13:56:34,527 DEBUG [M:0;da6aa2204f50:33783 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/623d87d7230f45e482a98e4622b84c81 as hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/623d87d7230f45e482a98e4622b84c81 2024-12-05T13:56:34,537 INFO [M:0;da6aa2204f50:33783 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/623d87d7230f45e482a98e4622b84c81, entries=8, sequenceid=28, filesize=5.5 K 2024-12-05T13:56:34,540 DEBUG [M:0;da6aa2204f50:33783 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ac471bf175054fb9805c08d91d6d038f as hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ac471bf175054fb9805c08d91d6d038f 2024-12-05T13:56:34,549 INFO [M:0;da6aa2204f50:33783 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ac471bf175054fb9805c08d91d6d038f, entries=3, sequenceid=28, filesize=5.2 K 2024-12-05T13:56:34,551 DEBUG [M:0;da6aa2204f50:33783 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b0d6aa7724a44e96af3759028db5faf1 as hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b0d6aa7724a44e96af3759028db5faf1 2024-12-05T13:56:34,560 INFO [M:0;da6aa2204f50:33783 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b0d6aa7724a44e96af3759028db5faf1, entries=1, sequenceid=28, filesize=5.0 K 2024-12-05T13:56:34,566 INFO [M:0;da6aa2204f50:33783 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.62 KB/7802, heapSize ~10.92 KB/11184, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1416ms, sequenceid=28, compaction requested=false 2024-12-05T13:56:34,569 INFO [M:0;da6aa2204f50:33783 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T13:56:34,569 DEBUG [M:0;da6aa2204f50:33783 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733406993144Disabling compacts and flushes for region at 1733406993144Disabling writes for close at 1733406993144Obtaining lock to block concurrent updates at 1733406993151 (+7 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733406993151Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7802, getHeapSize=11424, getOffHeapSize=0, getCellsCount=35 at 1733406993159 (+8 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733406993161 (+2 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733406993162 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733406993197 (+35 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733406993200 (+3 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733406993656 (+456 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733406993670 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733406993670Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733406994088 (+418 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733406994104 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733406994105 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1376262: reopening flushed file at 1733406994525 (+420 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@27cfbad2: reopening flushed file at 1733406994538 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3d1a1187: reopening flushed file at 1733406994549 (+11 ms)Finished flush of dataSize ~7.62 KB/7802, heapSize ~10.92 KB/11184, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1416ms, sequenceid=28, compaction requested=false at 1733406994566 (+17 ms)Writing region close event to WAL at 1733406994569 (+3 ms)Closed at 1733406994569 2024-12-05T13:56:34,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43231 is added to blk_1073741830_1006 (size=10165) 2024-12-05T13:56:34,576 WARN [Close-WAL-Writer-0 {}] wal.AsyncProtobufLogWriter(165): normal close failed, try recover java.io.IOException: Commit or complete block blk_1073741830_1006, whereas it is under recovery. at org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.commitOrCompleteLastBlock(BlockManager.java:1211) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.commitOrCompleteLastBlock(FSNamesystem.java:3890) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:731) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:110) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:653) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:163) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: Commit or complete block blk_1073741830_1006, whereas it is under recovery. at org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.commitOrCompleteLastBlock(BlockManager.java:1211) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.commitOrCompleteLastBlock(FSNamesystem.java:3890) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:731) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy38.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor2.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy39.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor2.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor2.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor2.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor2.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor2.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor2.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor2.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor2.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor2.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 6 more 2024-12-05T13:56:34,577 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file /user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/WALs/da6aa2204f50,33783,1733406987455/da6aa2204f50%2C33783%2C1733406987455.1733406990336 2024-12-05T13:56:34,579 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/WALs/da6aa2204f50,33783,1733406987455/da6aa2204f50%2C33783%2C1733406987455.1733406990336 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.recoverLease(FSNamesystem.java:2869) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.recoverLease(NameNodeRpcServer.java:872) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.recoverLease(ClientNamenodeProtocolServerSideTranslatorPB.java:834) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:951) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:605) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:166) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/WALs/da6aa2204f50,33783,1733406987455/da6aa2204f50%2C33783%2C1733406987455.1733406990336 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.recoverLease(FSNamesystem.java:2869) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.recoverLease(NameNodeRpcServer.java:872) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.recoverLease(ClientNamenodeProtocolServerSideTranslatorPB.java:834) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy38.recoverLease(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$recoverLease$24(ClientNamenodeProtocolTranslatorPB.java:685) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.recoverLease(ClientNamenodeProtocolTranslatorPB.java:685) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor24.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy39.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor24.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor24.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor24.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor24.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor24.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:949) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 17 more 2024-12-05T13:56:34,580 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/WALs/da6aa2204f50,33783,1733406987455/da6aa2204f50%2C33783%2C1733406987455.1733406990336 2024-12-05T13:56:34,580 ERROR [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2033): Unable to recover lease after several attempts. Give up. java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/WALs/da6aa2204f50,33783,1733406987455/da6aa2204f50%2C33783%2C1733406987455.1733406990336 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.recoverLease(FSNamesystem.java:2869) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.recoverLease(NameNodeRpcServer.java:872) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.recoverLease(ClientNamenodeProtocolServerSideTranslatorPB.java:834) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:951) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/WALs/da6aa2204f50,33783,1733406987455/da6aa2204f50%2C33783%2C1733406987455.1733406990336 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.recoverLease(FSNamesystem.java:2869) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.recoverLease(NameNodeRpcServer.java:872) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.recoverLease(ClientNamenodeProtocolServerSideTranslatorPB.java:834) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy38.recoverLease(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$recoverLease$24(ClientNamenodeProtocolTranslatorPB.java:685) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.recoverLease(ClientNamenodeProtocolTranslatorPB.java:685) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor24.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy39.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor24.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor24.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor24.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor24.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor24.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor24.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor24.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor24.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor24.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor24.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:949) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 16 more 2024-12-05T13:56:34,582 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T13:56:34,582 INFO [M:0;da6aa2204f50:33783 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-05T13:56:34,582 INFO [M:0;da6aa2204f50:33783 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33783 2024-12-05T13:56:34,585 INFO [M:0;da6aa2204f50:33783 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T13:56:34,655 WARN [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.NettyRpcConnection$2(409): Exception encountered while connecting to the server da6aa2204f50:33783 org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: finishConnect(..) failed: Connection refused: da6aa2204f50/172.17.0.2:33783 Caused by: java.net.ConnectException: finishConnect(..) failed: Connection refused at org.apache.hbase.thirdparty.io.netty.channel.unix.Errors.newConnectException0(Errors.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.unix.Errors.handleConnectErrno(Errors.java:131) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.unix.Socket.finishConnect(Socket.java:359) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.doFinishConnect(AbstractEpollChannel.java:715) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:692) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T13:56:34,657 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.FailedServers(52): Added failed server with address da6aa2204f50:33783 to list caused by org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: finishConnect(..) failed: Connection refused: da6aa2204f50/172.17.0.2:33783 2024-12-05T13:56:34,726 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33783-0x101a71134240000, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T13:56:34,726 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33783-0x101a71134240000, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T13:56:34,728 INFO [M:0;da6aa2204f50:33783 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T13:56:34,760 DEBUG [RS:0;da6aa2204f50:46569 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-05T13:56:34,764 INFO [HMaster-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48655, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-05T13:56:34,765 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46553 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-05T13:56:34,890 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46553 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-05T13:56:34,997 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46553 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-05T13:56:35,103 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46553 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-05T13:56:35,211 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46553 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-05T13:56:35,319 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46553 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-05T13:56:35,427 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46553 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-05T13:56:35,533 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46553 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-05T13:56:35,639 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46553 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-05T13:56:35,747 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46553 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-05T13:56:35,854 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46553 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-05T13:56:35,963 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46553 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-05T13:56:36,071 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46553 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-05T13:56:36,178 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46553 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-05T13:56:36,283 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46553 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-05T13:56:36,390 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46553 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-05T13:56:36,496 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46553 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-05T13:56:36,604 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46553 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-05T13:56:36,713 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46553 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-05T13:56:36,820 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46553 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-05T13:56:36,929 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46553 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-05T13:56:37,039 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46553 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-05T13:56:37,072 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-05T13:56:37,149 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46553 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-05T13:56:37,212 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/WALs/da6aa2204f50,33783,1733406987455-dead/da6aa2204f50%2C33783%2C1733406987455.1733406990336 after 4008ms 2024-12-05T13:56:37,213 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] region.MasterRegion(328): Renamed hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/WALs/da6aa2204f50,33783,1733406987455-dead/da6aa2204f50%2C33783%2C1733406987455.1733406990336 to hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.wals/da6aa2204f50%2C33783%2C1733406987455.1733406990336 2024-12-05T13:56:37,213 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] region.MasterRegion(330): Delete empty local region wal dir hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/WALs/da6aa2204f50,33783,1733406987455-dead 2024-12-05T13:56:37,214 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/WALs/da6aa2204f50,46553,1733406988083 2024-12-05T13:56:37,217 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-05T13:56:37,220 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=da6aa2204f50%2C46553%2C1733406988083, suffix=, logDir=hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/WALs/da6aa2204f50,46553,1733406988083, archiveDir=hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/oldWALs, maxLogs=10 2024-12-05T13:56:37,232 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/WALs/da6aa2204f50,46553,1733406988083/da6aa2204f50%2C46553%2C1733406988083.1733406997220, exclude list is [], retry=0 2024-12-05T13:56:37,236 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43231,DS-3dfab4d5-5bae-4d95-9e3c-7448b7c77506,DISK] 2024-12-05T13:56:37,238 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/WALs/da6aa2204f50,46553,1733406988083/da6aa2204f50%2C46553%2C1733406988083.1733406997220 2024-12-05T13:56:37,239 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36253:36253)] 2024-12-05T13:56:37,239 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-05T13:56:37,239 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T13:56:37,239 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T13:56:37,239 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T13:56:37,242 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T13:56:37,243 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-05T13:56:37,243 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:56:37,253 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46553 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-05T13:56:37,253 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/623d87d7230f45e482a98e4622b84c81 2024-12-05T13:56:37,254 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T13:56:37,254 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T13:56:37,256 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-05T13:56:37,256 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:56:37,265 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ac471bf175054fb9805c08d91d6d038f 2024-12-05T13:56:37,265 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:56:37,265 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T13:56:37,267 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-05T13:56:37,267 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:56:37,277 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b0d6aa7724a44e96af3759028db5faf1 2024-12-05T13:56:37,277 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:56:37,278 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T13:56:37,280 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-05T13:56:37,280 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:56:37,281 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:56:37,281 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T13:56:37,282 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(5516): Found 1 recovered edits file(s) under hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.wals 2024-12-05T13:56:37,282 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.wals/da6aa2204f50%2C33783%2C1733406987455.1733406990336 2024-12-05T13:56:37,295 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(5793): Applied 0, skipped 37, firstSequenceIdInLog=3, maxSequenceIdInLog=30, path=hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.wals/da6aa2204f50%2C33783%2C1733406987455.1733406990336 2024-12-05T13:56:37,296 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.wals/da6aa2204f50%2C33783%2C1733406987455.1733406990336 2024-12-05T13:56:37,299 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T13:56:37,299 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T13:56:37,300 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-05T13:56:37,302 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T13:56:37,306 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/30.seqid, newMaxSeqId=30, maxSeqId=1 2024-12-05T13:56:37,308 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=31; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70619462, jitterRate=0.052311986684799194}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-05T13:56:37,309 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733406997239Initializing all the Stores at 1733406997241 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733406997241Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406997241Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406997241Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406997241Cleaning up temporary data from old regions at 1733406997299 (+58 ms)Region opened successfully at 1733406997309 (+10 ms) 2024-12-05T13:56:37,309 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-05T13:56:37,310 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@21332020, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=da6aa2204f50/172.17.0.2:0 2024-12-05T13:56:37,319 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] master.HMaster(884): The info family in master local region already has data in it, skip migrating... 2024-12-05T13:56:37,320 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-05T13:56:37,320 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-05T13:56:37,320 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-05T13:56:37,321 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-05T13:56:37,325 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(545): Completed pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-05T13:56:37,325 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 3 msec 2024-12-05T13:56:37,325 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-05T13:56:37,329 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] assignment.RegionStateStore(171): Load hbase:meta entry region=1588230740, regionState=OPEN, lastHost=da6aa2204f50,46569,1733406988224, regionLocation=da6aa2204f50,46569,1733406988224, openSeqNum=2 2024-12-05T13:56:37,329 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] assignment.AssignmentManager(349): Loaded hbase:meta state=OPEN, location=da6aa2204f50,46569,1733406988224, table=hbase:meta, region=1588230740 2024-12-05T13:56:37,330 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as da6aa2204f50,46569,1733406988224, state=OPEN 2024-12-05T13:56:37,356 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46553 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-05T13:56:37,375 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44809-0x101a71134240002, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T13:56:37,375 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46553-0x101a71134240001, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T13:56:37,375 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46569-0x101a71134240003, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T13:56:37,376 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T13:56:37,376 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T13:56:37,376 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T13:56:37,384 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 1 possibly 'live' servers, and 0 'splitting'. 2024-12-05T13:56:37,386 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] master.ServerManager(517): Registering regionserver=da6aa2204f50,46569,1733406988224 2024-12-05T13:56:37,387 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46553-0x101a71134240001, quorum=127.0.0.1:62287, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-05T13:56:37,397 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-05T13:56:37,397 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-05T13:56:37,398 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46553-0x101a71134240001, quorum=127.0.0.1:62287, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-05T13:56:37,408 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-05T13:56:37,409 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-05T13:56:37,411 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46553-0x101a71134240001, quorum=127.0.0.1:62287, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-05T13:56:37,417 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-05T13:56:37,419 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46553-0x101a71134240001, quorum=127.0.0.1:62287, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-05T13:56:37,425 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-05T13:56:37,431 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46553-0x101a71134240001, quorum=127.0.0.1:62287, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-05T13:56:37,442 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-05T13:56:37,443 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=da6aa2204f50,46553,1733406988083, sessionid=0x101a71134240001, setting cluster-up flag (Was=true) 2024-12-05T13:56:37,454 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-05T13:56:37,458 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=da6aa2204f50,46553,1733406988083 2024-12-05T13:56:37,469 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-05T13:56:37,471 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=da6aa2204f50,46553,1733406988083 2024-12-05T13:56:37,474 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] master.ServerManager(1190): begin to load .lastflushedseqids at hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/.lastflushedseqids 2024-12-05T13:56:37,478 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46553 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-05T13:56:37,479 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-05T13:56:37,479 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-05T13:56:37,480 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: da6aa2204f50,46553,1733406988083 Number of backup masters: 1 da6aa2204f50,44809,1733406988128 Number of live region servers: 1 da6aa2204f50,46569,1733406988224 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-05T13:56:37,484 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/da6aa2204f50:0, corePoolSize=5, maxPoolSize=5 2024-12-05T13:56:37,484 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/da6aa2204f50:0, corePoolSize=5, maxPoolSize=5 2024-12-05T13:56:37,484 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/da6aa2204f50:0, corePoolSize=5, maxPoolSize=5 2024-12-05T13:56:37,484 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/da6aa2204f50:0, corePoolSize=5, maxPoolSize=5 2024-12-05T13:56:37,484 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/da6aa2204f50:0, corePoolSize=10, maxPoolSize=10 2024-12-05T13:56:37,485 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/da6aa2204f50:0, corePoolSize=1, maxPoolSize=1 2024-12-05T13:56:37,485 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/da6aa2204f50:0, corePoolSize=2, maxPoolSize=2 2024-12-05T13:56:37,485 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/da6aa2204f50:0, corePoolSize=1, maxPoolSize=1 2024-12-05T13:56:37,486 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733407027486 2024-12-05T13:56:37,486 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-05T13:56:37,486 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-05T13:56:37,487 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-05T13:56:37,487 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-05T13:56:37,487 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-05T13:56:37,487 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-05T13:56:37,487 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-05T13:56:37,488 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-05T13:56:37,488 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-05T13:56:37,488 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-05T13:56:37,488 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-05T13:56:37,488 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-05T13:56:37,489 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/da6aa2204f50:0:becomeActiveMaster-HFileCleaner.large.0-1733406997488,5,FailOnTimeoutGroup] 2024-12-05T13:56:37,489 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/da6aa2204f50:0:becomeActiveMaster-HFileCleaner.small.0-1733406997489,5,FailOnTimeoutGroup] 2024-12-05T13:56:37,489 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-05T13:56:37,489 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-05T13:56:37,489 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-05T13:56:37,489 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-05T13:56:37,490 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733406997489, completionTime=-1 2024-12-05T13:56:37,490 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-05T13:56:37,490 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-05T13:56:37,490 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T13:56:37,490 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=da6aa2204f50,46569,1733406988224, seqNum=-1] 2024-12-05T13:56:37,491 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T13:56:37,493 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53659, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T13:56:37,495 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-05T13:56:37,495 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733407057495 2024-12-05T13:56:37,495 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733407117495 2024-12-05T13:56:37,495 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 5 msec 2024-12-05T13:56:37,496 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=da6aa2204f50,46553,1733406988083-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T13:56:37,496 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=da6aa2204f50,46553,1733406988083-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T13:56:37,496 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=da6aa2204f50,46553,1733406988083-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T13:56:37,496 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-da6aa2204f50:46553, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T13:56:37,496 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-05T13:56:37,497 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-05T13:56:37,500 DEBUG [master/da6aa2204f50:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-05T13:56:37,504 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 4.778sec 2024-12-05T13:56:37,505 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-05T13:56:37,505 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-05T13:56:37,505 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-05T13:56:37,505 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-05T13:56:37,505 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-05T13:56:37,505 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=da6aa2204f50,46553,1733406988083-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T13:56:37,505 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=da6aa2204f50,46553,1733406988083-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-05T13:56:37,509 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-05T13:56:37,509 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-05T13:56:37,509 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=da6aa2204f50,46553,1733406988083-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T13:56:37,538 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-05T13:56:37,539 INFO [Time-limited test {}] hbase.SingleProcessHBaseCluster(618): Stopping Thread[M:1;da6aa2204f50:46553,5,FailOnTimeoutGroup] 2024-12-05T13:56:37,539 INFO [Time-limited test {}] master.HMaster(3321): ***** STOPPING master 'da6aa2204f50,46553,1733406988083' ***** 2024-12-05T13:56:37,539 INFO [Time-limited test {}] master.HMaster(3323): STOPPED: Stopping master 0 2024-12-05T13:56:37,539 INFO [M:1;da6aa2204f50:46553 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T13:56:37,539 INFO [M:1;da6aa2204f50:46553 {}] client.AsyncConnectionImpl(233): Connection has been closed by M:1;da6aa2204f50:46553. 2024-12-05T13:56:37,539 DEBUG [M:1;da6aa2204f50:46553 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:630) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T13:56:37,539 DEBUG [M:1;da6aa2204f50:46553 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T13:56:37,540 INFO [M:1;da6aa2204f50:46553 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T13:56:37,540 DEBUG [M:1;da6aa2204f50:46553 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-05T13:56:37,540 DEBUG [M:1;da6aa2204f50:46553 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-05T13:56:37,540 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-05T13:56:37,540 DEBUG [master/da6aa2204f50:0:becomeActiveMaster-HFileCleaner.small.0-1733406997489 {}] cleaner.HFileCleaner(306): Exit Thread[master/da6aa2204f50:0:becomeActiveMaster-HFileCleaner.small.0-1733406997489,5,FailOnTimeoutGroup] 2024-12-05T13:56:37,540 DEBUG [master/da6aa2204f50:0:becomeActiveMaster-HFileCleaner.large.0-1733406997488 {}] cleaner.HFileCleaner(306): Exit Thread[master/da6aa2204f50:0:becomeActiveMaster-HFileCleaner.large.0-1733406997488,5,FailOnTimeoutGroup] 2024-12-05T13:56:37,540 INFO [M:1;da6aa2204f50:46553 {}] hbase.ChoreService(370): Chore service for: master/da6aa2204f50:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-05T13:56:37,541 INFO [M:1;da6aa2204f50:46553 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T13:56:37,541 DEBUG [M:1;da6aa2204f50:46553 {}] master.HMaster(1795): Stopping service threads 2024-12-05T13:56:37,541 INFO [M:1;da6aa2204f50:46553 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-05T13:56:37,541 INFO [M:1;da6aa2204f50:46553 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-05T13:56:37,541 INFO [M:1;da6aa2204f50:46553 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-05T13:56:37,542 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-05T13:56:37,550 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44809-0x101a71134240002, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-05T13:56:37,550 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46569-0x101a71134240003, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-05T13:56:37,550 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46553-0x101a71134240001, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-05T13:56:37,550 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44809-0x101a71134240002, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:56:37,550 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46569-0x101a71134240003, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:56:37,550 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46553-0x101a71134240001, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:56:37,550 INFO [Time-limited test {}] hbase.LocalHBaseCluster(362): Waiting on da6aa2204f50,46553,1733406988083 2024-12-05T13:56:37,550 DEBUG [M:1;da6aa2204f50:46553 {}] zookeeper.ZKUtil(347): master:46553-0x101a71134240001, quorum=127.0.0.1:62287, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-05T13:56:37,550 WARN [M:1;da6aa2204f50:46553 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-05T13:56:37,551 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44809-0x101a71134240002, quorum=127.0.0.1:62287, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-05T13:56:37,551 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46569-0x101a71134240003, quorum=127.0.0.1:62287, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-05T13:56:37,551 DEBUG [zk-event-processor-pool-0 {}] master.ActiveMasterManager(204): No master available. Notifying waiting threads 2024-12-05T13:56:37,552 INFO [M:1;da6aa2204f50:46553 {}] master.ServerManager(1134): Rewriting .lastflushedseqids file at: hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/.lastflushedseqids 2024-12-05T13:56:37,559 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46553-0x101a71134240001, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:56:37,559 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46569-0x101a71134240003, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-05T13:56:37,559 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44809-0x101a71134240002, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-05T13:56:37,559 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46569-0x101a71134240003, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:56:37,560 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44809-0x101a71134240002, quorum=127.0.0.1:62287, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-05T13:56:37,560 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:44809-0x101a71134240002, quorum=127.0.0.1:62287, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-05T13:56:37,561 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/da6aa2204f50,44809,1733406988128 from backup master directory 2024-12-05T13:56:37,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43231 is added to blk_1073741841_1018 (size=99) 2024-12-05T13:56:37,563 INFO [M:1;da6aa2204f50:46553 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-05T13:56:37,563 INFO [M:1;da6aa2204f50:46553 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-05T13:56:37,563 DEBUG [M:1;da6aa2204f50:46553 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-05T13:56:37,563 INFO [M:1;da6aa2204f50:46553 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T13:56:37,563 DEBUG [M:1;da6aa2204f50:46553 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T13:56:37,563 DEBUG [M:1;da6aa2204f50:46553 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-05T13:56:37,563 DEBUG [M:1;da6aa2204f50:46553 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T13:56:37,563 INFO [M:1;da6aa2204f50:46553 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=48 B heapSize=1.12 KB 2024-12-05T13:56:37,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44809-0x101a71134240002, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/da6aa2204f50,44809,1733406988128 2024-12-05T13:56:37,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46553-0x101a71134240001, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T13:56:37,572 WARN [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T13:56:37,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44809-0x101a71134240002, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T13:56:37,572 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=da6aa2204f50,44809,1733406988128 2024-12-05T13:56:37,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46569-0x101a71134240003, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T13:56:37,583 DEBUG [M:1;da6aa2204f50:46553 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1d9464d9fb90466aa0ef6376f5aa385a is 52, key is load_balancer_on/state:d/1733406997535/Put/seqid=0 2024-12-05T13:56:37,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43231 is added to blk_1073741842_1019 (size=5056) 2024-12-05T13:56:37,590 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T13:56:37,590 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-05T13:56:37,599 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=1) cost 9ms. 2024-12-05T13:56:37,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43231 is added to blk_1073741843_1020 (size=196) 2024-12-05T13:56:37,850 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-05T13:56:37,891 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-05T13:56:37,891 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-05T13:56:37,892 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-05T13:56:37,892 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-05T13:56:37,893 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-05T13:56:37,893 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-05T13:56:37,991 INFO [M:1;da6aa2204f50:46553 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=33 (bloomFilter=true), to=hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1d9464d9fb90466aa0ef6376f5aa385a 2024-12-05T13:56:38,006 DEBUG [M:1;da6aa2204f50:46553 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1d9464d9fb90466aa0ef6376f5aa385a as hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/1d9464d9fb90466aa0ef6376f5aa385a 2024-12-05T13:56:38,015 INFO [M:1;da6aa2204f50:46553 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/1d9464d9fb90466aa0ef6376f5aa385a, entries=1, sequenceid=33, filesize=4.9 K 2024-12-05T13:56:38,016 INFO [M:1;da6aa2204f50:46553 {}] regionserver.HRegion(3140): Finished flush of dataSize ~48 B/48, heapSize ~360 B/360, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 453ms, sequenceid=33, compaction requested=false 2024-12-05T13:56:38,017 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T13:56:38,018 INFO [M:1;da6aa2204f50:46553 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T13:56:38,018 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-05T13:56:38,018 DEBUG [M:1;da6aa2204f50:46553 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733406997563Disabling compacts and flushes for region at 1733406997563Disabling writes for close at 1733406997563Obtaining lock to block concurrent updates at 1733406997563Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733406997563Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=48, getHeapSize=1080, getOffHeapSize=0, getCellsCount=1 at 1733406997564 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733406997565 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733406997565Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733406997582 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733406997582Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@10cf0def: reopening flushed file at 1733406998005 (+423 ms)Finished flush of dataSize ~48 B/48, heapSize ~360 B/360, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 453ms, sequenceid=33, compaction requested=false at 1733406998016 (+11 ms)Writing region close event to WAL at 1733406998018 (+2 ms)Closed at 1733406998018 2024-12-05T13:56:38,018 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T13:56:38,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43231 is added to blk_1073741840_1017 (size=767) 2024-12-05T13:56:38,022 INFO [M:1;da6aa2204f50:46553 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-05T13:56:38,022 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T13:56:38,023 INFO [M:1;da6aa2204f50:46553 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46553 2024-12-05T13:56:38,023 INFO [M:1;da6aa2204f50:46553 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T13:56:38,027 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] region.MasterRegion(342): old store file tracker DEFAULT is the same with new store file tracker, skip migration 2024-12-05T13:56:38,030 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] region.MasterRegion(316): Renamed hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/WALs/da6aa2204f50,46553,1733406988083 to hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/WALs/da6aa2204f50,46553,1733406988083-dead as it is dead 2024-12-05T13:56:38,030 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/WALs/da6aa2204f50,46553,1733406988083-dead/da6aa2204f50%2C46553%2C1733406988083.1733406997220 2024-12-05T13:56:38,031 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/WALs/da6aa2204f50,46553,1733406988083-dead/da6aa2204f50%2C46553%2C1733406988083.1733406997220 after 1ms 2024-12-05T13:56:38,032 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] region.MasterRegion(328): Renamed hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/WALs/da6aa2204f50,46553,1733406988083-dead/da6aa2204f50%2C46553%2C1733406988083.1733406997220 to hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.wals/da6aa2204f50%2C46553%2C1733406988083.1733406997220 2024-12-05T13:56:38,032 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] region.MasterRegion(330): Delete empty local region wal dir hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/WALs/da6aa2204f50,46553,1733406988083-dead 2024-12-05T13:56:38,032 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/WALs/da6aa2204f50,44809,1733406988128 2024-12-05T13:56:38,034 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-05T13:56:38,036 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=da6aa2204f50%2C44809%2C1733406988128, suffix=, logDir=hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/WALs/da6aa2204f50,44809,1733406988128, archiveDir=hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/oldWALs, maxLogs=10 2024-12-05T13:56:38,050 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/WALs/da6aa2204f50,44809,1733406988128/da6aa2204f50%2C44809%2C1733406988128.1733406998036, exclude list is [], retry=0 2024-12-05T13:56:38,053 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43231,DS-3dfab4d5-5bae-4d95-9e3c-7448b7c77506,DISK] 2024-12-05T13:56:38,056 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/WALs/da6aa2204f50,44809,1733406988128/da6aa2204f50%2C44809%2C1733406988128.1733406998036 2024-12-05T13:56:38,056 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36253:36253)] 2024-12-05T13:56:38,056 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-05T13:56:38,056 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T13:56:38,057 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T13:56:38,057 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T13:56:38,059 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T13:56:38,061 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-05T13:56:38,061 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:56:38,069 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/623d87d7230f45e482a98e4622b84c81 2024-12-05T13:56:38,069 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T13:56:38,069 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T13:56:38,070 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-05T13:56:38,071 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:56:38,078 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ac471bf175054fb9805c08d91d6d038f 2024-12-05T13:56:38,078 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:56:38,078 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T13:56:38,080 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-05T13:56:38,080 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:56:38,087 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b0d6aa7724a44e96af3759028db5faf1 2024-12-05T13:56:38,088 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:56:38,088 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T13:56:38,089 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-05T13:56:38,089 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T13:56:38,097 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/1d9464d9fb90466aa0ef6376f5aa385a 2024-12-05T13:56:38,097 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T13:56:38,097 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T13:56:38,098 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(5516): Found 1 recovered edits file(s) under hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.wals 2024-12-05T13:56:38,098 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.wals/da6aa2204f50%2C46553%2C1733406988083.1733406997220 2024-12-05T13:56:38,101 WARN [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.NettyRpcConnection$2(409): Exception encountered while connecting to the server da6aa2204f50:46553 org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: finishConnect(..) failed: Connection refused: da6aa2204f50/172.17.0.2:46553 Caused by: java.net.ConnectException: finishConnect(..) failed: Connection refused at org.apache.hbase.thirdparty.io.netty.channel.unix.Errors.newConnectException0(Errors.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.unix.Errors.handleConnectErrno(Errors.java:131) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.unix.Socket.finishConnect(Socket.java:359) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.doFinishConnect(AbstractEpollChannel.java:715) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:692) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T13:56:38,102 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.FailedServers(52): Added failed server with address da6aa2204f50:46553 to list caused by org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: finishConnect(..) failed: Connection refused: da6aa2204f50/172.17.0.2:46553 2024-12-05T13:56:38,103 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(5793): Applied 0, skipped 3, firstSequenceIdInLog=32, maxSequenceIdInLog=35, path=hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.wals/da6aa2204f50%2C46553%2C1733406988083.1733406997220 2024-12-05T13:56:38,104 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.wals/da6aa2204f50%2C46553%2C1733406988083.1733406997220 2024-12-05T13:56:38,106 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T13:56:38,106 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T13:56:38,107 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-05T13:56:38,108 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T13:56:38,111 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/35.seqid, newMaxSeqId=35, maxSeqId=30 2024-12-05T13:56:38,112 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=36; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64300489, jitterRate=-0.04184804856777191}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-05T13:56:38,113 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733406998057Initializing all the Stores at 1733406998058 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733406998059 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406998059Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406998059Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733406998059Cleaning up temporary data from old regions at 1733406998106 (+47 ms)Region opened successfully at 1733406998113 (+7 ms) 2024-12-05T13:56:38,113 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-05T13:56:38,114 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@112492ac, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=da6aa2204f50/172.17.0.2:0 2024-12-05T13:56:38,117 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] master.HMaster(884): The info family in master local region already has data in it, skip migrating... 2024-12-05T13:56:38,118 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-05T13:56:38,118 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-05T13:56:38,118 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-05T13:56:38,118 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-05T13:56:38,122 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(545): Completed pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-05T13:56:38,122 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 3 msec 2024-12-05T13:56:38,122 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-05T13:56:38,126 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] assignment.RegionStateStore(171): Load hbase:meta entry region=1588230740, regionState=OPEN, lastHost=da6aa2204f50,46569,1733406988224, regionLocation=da6aa2204f50,46569,1733406988224, openSeqNum=2 2024-12-05T13:56:38,126 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] assignment.AssignmentManager(349): Loaded hbase:meta state=OPEN, location=da6aa2204f50,46569,1733406988224, table=hbase:meta, region=1588230740 2024-12-05T13:56:38,126 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as da6aa2204f50,46569,1733406988224, state=OPEN 2024-12-05T13:56:38,133 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46569-0x101a71134240003, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T13:56:38,133 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44809-0x101a71134240002, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T13:56:38,134 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T13:56:38,134 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T13:56:38,139 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 1 possibly 'live' servers, and 0 'splitting'. 2024-12-05T13:56:38,140 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] master.ServerManager(517): Registering regionserver=da6aa2204f50,46569,1733406988224 2024-12-05T13:56:38,150 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-05T13:56:38,154 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-05T13:56:38,155 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44809-0x101a71134240002, quorum=127.0.0.1:62287, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-05T13:56:38,164 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-05T13:56:38,164 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-05T13:56:38,166 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44809-0x101a71134240002, quorum=127.0.0.1:62287, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-05T13:56:38,167 INFO [M:1;da6aa2204f50:46553 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T13:56:38,167 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46553-0x101a71134240001, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T13:56:38,167 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46553-0x101a71134240001, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T13:56:38,175 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-05T13:56:38,177 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44809-0x101a71134240002, quorum=127.0.0.1:62287, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-05T13:56:38,184 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-05T13:56:38,190 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44809-0x101a71134240002, quorum=127.0.0.1:62287, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-05T13:56:38,200 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-05T13:56:38,201 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=da6aa2204f50,44809,1733406988128, sessionid=0x101a71134240002, setting cluster-up flag (Was=true) 2024-12-05T13:56:38,204 DEBUG [RS:0;da6aa2204f50:46569 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-05T13:56:38,208 INFO [HMaster-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56719, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-05T13:56:38,210 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44809 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-05T13:56:38,210 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-05T13:56:38,212 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=da6aa2204f50,44809,1733406988128 2024-12-05T13:56:38,223 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-05T13:56:38,225 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=da6aa2204f50,44809,1733406988128 2024-12-05T13:56:38,227 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] master.ServerManager(1190): begin to load .lastflushedseqids at hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/.lastflushedseqids 2024-12-05T13:56:38,232 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-05T13:56:38,232 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-05T13:56:38,232 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: da6aa2204f50,44809,1733406988128 Number of backup masters: 0 Number of live region servers: 1 da6aa2204f50,46569,1733406988224 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-05T13:56:38,235 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/da6aa2204f50:0, corePoolSize=5, maxPoolSize=5 2024-12-05T13:56:38,235 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/da6aa2204f50:0, corePoolSize=5, maxPoolSize=5 2024-12-05T13:56:38,236 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/da6aa2204f50:0, corePoolSize=5, maxPoolSize=5 2024-12-05T13:56:38,236 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/da6aa2204f50:0, corePoolSize=5, maxPoolSize=5 2024-12-05T13:56:38,236 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/da6aa2204f50:0, corePoolSize=10, maxPoolSize=10 2024-12-05T13:56:38,236 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/da6aa2204f50:0, corePoolSize=1, maxPoolSize=1 2024-12-05T13:56:38,236 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/da6aa2204f50:0, corePoolSize=2, maxPoolSize=2 2024-12-05T13:56:38,236 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/da6aa2204f50:0, corePoolSize=1, maxPoolSize=1 2024-12-05T13:56:38,238 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733407028237 2024-12-05T13:56:38,238 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-05T13:56:38,238 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-05T13:56:38,238 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-05T13:56:38,238 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-05T13:56:38,238 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-05T13:56:38,238 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-05T13:56:38,239 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-05T13:56:38,239 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-05T13:56:38,239 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-05T13:56:38,239 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-05T13:56:38,240 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-05T13:56:38,240 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-05T13:56:38,240 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/da6aa2204f50:0:becomeActiveMaster-HFileCleaner.large.0-1733406998240,5,FailOnTimeoutGroup] 2024-12-05T13:56:38,240 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/da6aa2204f50:0:becomeActiveMaster-HFileCleaner.small.0-1733406998240,5,FailOnTimeoutGroup] 2024-12-05T13:56:38,241 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-05T13:56:38,241 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-05T13:56:38,241 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-05T13:56:38,241 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-05T13:56:38,241 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733406998241, completionTime=-1 2024-12-05T13:56:38,241 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-05T13:56:38,241 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-05T13:56:38,242 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T13:56:38,242 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=da6aa2204f50,46569,1733406988224, seqNum=-1] 2024-12-05T13:56:38,243 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T13:56:38,246 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37737, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T13:56:38,249 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-05T13:56:38,249 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733407058249 2024-12-05T13:56:38,249 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733407118249 2024-12-05T13:56:38,249 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 7 msec 2024-12-05T13:56:38,249 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=da6aa2204f50,44809,1733406988128-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T13:56:38,250 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=da6aa2204f50,44809,1733406988128-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T13:56:38,250 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=da6aa2204f50,44809,1733406988128-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T13:56:38,250 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-da6aa2204f50:44809, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T13:56:38,250 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-05T13:56:38,250 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-05T13:56:38,253 DEBUG [master/da6aa2204f50:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-05T13:56:38,257 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.684sec 2024-12-05T13:56:38,257 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-05T13:56:38,257 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-05T13:56:38,257 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-05T13:56:38,257 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-05T13:56:38,257 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-05T13:56:38,257 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=da6aa2204f50,44809,1733406988128-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T13:56:38,257 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=da6aa2204f50,44809,1733406988128-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-05T13:56:38,260 DEBUG [master/da6aa2204f50:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-05T13:56:38,260 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-05T13:56:38,260 INFO [master/da6aa2204f50:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=da6aa2204f50,44809,1733406988128-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T13:56:38,268 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-05T13:56:38,268 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-05T13:56:38,268 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.master.TestMasterFailoverBalancerPersistence.testMasterFailoverBalancerPersistence(TestMasterFailoverBalancerPersistence.java:81) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T13:56:38,268 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T13:56:38,269 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T13:56:38,269 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T13:56:38,269 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-05T13:56:38,269 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=2047677256, stopped=false 2024-12-05T13:56:38,270 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=da6aa2204f50,44809,1733406988128 2024-12-05T13:56:38,275 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46569-0x101a71134240003, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T13:56:38,275 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44809-0x101a71134240002, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T13:56:38,275 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-05T13:56:38,275 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46569-0x101a71134240003, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:56:38,275 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44809-0x101a71134240002, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:56:38,275 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-05T13:56:38,276 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.master.TestMasterFailoverBalancerPersistence.testMasterFailoverBalancerPersistence(TestMasterFailoverBalancerPersistence.java:81) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T13:56:38,276 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T13:56:38,276 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44809-0x101a71134240002, quorum=127.0.0.1:62287, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T13:56:38,276 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46569-0x101a71134240003, quorum=127.0.0.1:62287, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T13:56:38,276 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'da6aa2204f50,46569,1733406988224' ***** 2024-12-05T13:56:38,276 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-05T13:56:38,276 INFO [RS:0;da6aa2204f50:46569 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-05T13:56:38,277 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-05T13:56:38,277 INFO [RS:0;da6aa2204f50:46569 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-05T13:56:38,277 INFO [RS:0;da6aa2204f50:46569 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-05T13:56:38,277 INFO [RS:0;da6aa2204f50:46569 {}] regionserver.HRegionServer(959): stopping server da6aa2204f50,46569,1733406988224 2024-12-05T13:56:38,277 INFO [RS:0;da6aa2204f50:46569 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T13:56:38,277 INFO [RS:0;da6aa2204f50:46569 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;da6aa2204f50:46569. 2024-12-05T13:56:38,277 DEBUG [RS:0;da6aa2204f50:46569 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T13:56:38,277 DEBUG [RS:0;da6aa2204f50:46569 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T13:56:38,277 INFO [RS:0;da6aa2204f50:46569 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-05T13:56:38,277 INFO [RS:0;da6aa2204f50:46569 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-05T13:56:38,278 INFO [RS:0;da6aa2204f50:46569 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-05T13:56:38,278 INFO [RS:0;da6aa2204f50:46569 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-05T13:56:38,278 INFO [RS:0;da6aa2204f50:46569 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-05T13:56:38,278 DEBUG [RS:0;da6aa2204f50:46569 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-05T13:56:38,279 DEBUG [RS:0;da6aa2204f50:46569 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-05T13:56:38,279 DEBUG [RS_CLOSE_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-05T13:56:38,279 INFO [RS_CLOSE_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-05T13:56:38,279 DEBUG [RS_CLOSE_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-05T13:56:38,279 DEBUG [RS_CLOSE_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-05T13:56:38,279 DEBUG [RS_CLOSE_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-05T13:56:38,279 INFO [RS_CLOSE_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-12-05T13:56:38,296 DEBUG [RS_CLOSE_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/data/hbase/meta/1588230740/.tmp/ns/45add82d222d4635821e85e9f7a5701f is 43, key is default/ns:d/1733406992459/Put/seqid=0 2024-12-05T13:56:38,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43231 is added to blk_1073741845_1022 (size=5153) 2024-12-05T13:56:38,311 INFO [regionserver/da6aa2204f50:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T13:56:38,479 DEBUG [RS:0;da6aa2204f50:46569 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-05T13:56:38,680 DEBUG [RS:0;da6aa2204f50:46569 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-05T13:56:38,703 INFO [RS_CLOSE_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/data/hbase/meta/1588230740/.tmp/ns/45add82d222d4635821e85e9f7a5701f 2024-12-05T13:56:38,720 DEBUG [RS_CLOSE_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/data/hbase/meta/1588230740/.tmp/ns/45add82d222d4635821e85e9f7a5701f as hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/data/hbase/meta/1588230740/ns/45add82d222d4635821e85e9f7a5701f 2024-12-05T13:56:38,728 INFO [RS_CLOSE_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/data/hbase/meta/1588230740/ns/45add82d222d4635821e85e9f7a5701f, entries=2, sequenceid=6, filesize=5.0 K 2024-12-05T13:56:38,730 INFO [RS_CLOSE_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 451ms, sequenceid=6, compaction requested=false 2024-12-05T13:56:38,735 DEBUG [RS_CLOSE_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-05T13:56:38,736 DEBUG [RS_CLOSE_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-05T13:56:38,736 INFO [RS_CLOSE_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-05T13:56:38,736 DEBUG [RS_CLOSE_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733406998278Running coprocessor pre-close hooks at 1733406998279 (+1 ms)Disabling compacts and flushes for region at 1733406998279Disabling writes for close at 1733406998279Obtaining lock to block concurrent updates at 1733406998279Preparing flush snapshotting stores in 1588230740 at 1733406998279Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1733406998279Flushing stores of hbase:meta,,1.1588230740 at 1733406998280 (+1 ms)Flushing 1588230740/ns: creating writer at 1733406998280Flushing 1588230740/ns: appending metadata at 1733406998295 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1733406998295Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6f528028: reopening flushed file at 1733406998719 (+424 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 451ms, sequenceid=6, compaction requested=false at 1733406998730 (+11 ms)Writing region close event to WAL at 1733406998730Running coprocessor post-close hooks at 1733406998736 (+6 ms)Closed at 1733406998736 2024-12-05T13:56:38,737 DEBUG [RS_CLOSE_META-regionserver/da6aa2204f50:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-05T13:56:38,880 INFO [RS:0;da6aa2204f50:46569 {}] regionserver.HRegionServer(976): stopping server da6aa2204f50,46569,1733406988224; all regions closed. 2024-12-05T13:56:38,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43231 is added to blk_1073741834_1010 (size=1152) 2024-12-05T13:56:38,897 DEBUG [RS:0;da6aa2204f50:46569 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/oldWALs 2024-12-05T13:56:38,897 INFO [RS:0;da6aa2204f50:46569 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL da6aa2204f50%2C46569%2C1733406988224.meta:.meta(num 1733406992303) 2024-12-05T13:56:38,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43231 is added to blk_1073741832_1008 (size=93) 2024-12-05T13:56:38,903 DEBUG [RS:0;da6aa2204f50:46569 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/oldWALs 2024-12-05T13:56:38,903 INFO [RS:0;da6aa2204f50:46569 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL da6aa2204f50%2C46569%2C1733406988224:(num 1733406991258) 2024-12-05T13:56:38,903 DEBUG [RS:0;da6aa2204f50:46569 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T13:56:38,903 INFO [RS:0;da6aa2204f50:46569 {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T13:56:38,903 INFO [RS:0;da6aa2204f50:46569 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T13:56:38,904 INFO [RS:0;da6aa2204f50:46569 {}] hbase.ChoreService(370): Chore service for: regionserver/da6aa2204f50:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-05T13:56:38,904 INFO [RS:0;da6aa2204f50:46569 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T13:56:38,904 INFO [regionserver/da6aa2204f50:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T13:56:38,904 INFO [RS:0;da6aa2204f50:46569 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46569 2024-12-05T13:56:38,942 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44809-0x101a71134240002, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T13:56:38,942 INFO [RS:0;da6aa2204f50:46569 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T13:56:38,942 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46569-0x101a71134240003, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/da6aa2204f50,46569,1733406988224 2024-12-05T13:56:38,950 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [da6aa2204f50,46569,1733406988224] 2024-12-05T13:56:38,958 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/da6aa2204f50,46569,1733406988224 already deleted, retry=false 2024-12-05T13:56:38,959 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; da6aa2204f50,46569,1733406988224 expired; onlineServers=0 2024-12-05T13:56:38,959 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'da6aa2204f50,44809,1733406988128' ***** 2024-12-05T13:56:38,959 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-05T13:56:38,959 INFO [M:2;da6aa2204f50:44809 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T13:56:38,959 INFO [M:2;da6aa2204f50:44809 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T13:56:38,959 DEBUG [M:2;da6aa2204f50:44809 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-05T13:56:38,960 DEBUG [M:2;da6aa2204f50:44809 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-05T13:56:38,960 DEBUG [master/da6aa2204f50:0:becomeActiveMaster-HFileCleaner.small.0-1733406998240 {}] cleaner.HFileCleaner(306): Exit Thread[master/da6aa2204f50:0:becomeActiveMaster-HFileCleaner.small.0-1733406998240,5,FailOnTimeoutGroup] 2024-12-05T13:56:38,960 DEBUG [master/da6aa2204f50:0:becomeActiveMaster-HFileCleaner.large.0-1733406998240 {}] cleaner.HFileCleaner(306): Exit Thread[master/da6aa2204f50:0:becomeActiveMaster-HFileCleaner.large.0-1733406998240,5,FailOnTimeoutGroup] 2024-12-05T13:56:38,960 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-05T13:56:38,960 INFO [M:2;da6aa2204f50:44809 {}] hbase.ChoreService(370): Chore service for: master/da6aa2204f50:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-05T13:56:38,960 INFO [M:2;da6aa2204f50:44809 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T13:56:38,960 DEBUG [M:2;da6aa2204f50:44809 {}] master.HMaster(1795): Stopping service threads 2024-12-05T13:56:38,961 INFO [M:2;da6aa2204f50:44809 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-05T13:56:38,961 INFO [M:2;da6aa2204f50:44809 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-05T13:56:38,961 INFO [M:2;da6aa2204f50:44809 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-05T13:56:38,961 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-05T13:56:38,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44809-0x101a71134240002, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-05T13:56:38,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44809-0x101a71134240002, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T13:56:38,967 DEBUG [M:2;da6aa2204f50:44809 {}] zookeeper.ZKUtil(347): master:44809-0x101a71134240002, quorum=127.0.0.1:62287, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-05T13:56:38,967 WARN [M:2;da6aa2204f50:44809 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-05T13:56:38,969 INFO [M:2;da6aa2204f50:44809 {}] master.ServerManager(1134): Rewriting .lastflushedseqids file at: hdfs://localhost:44371/user/jenkins/test-data/a4a6e13b-ee4f-bbc6-6380-377461bd543a/.lastflushedseqids 2024-12-05T13:56:38,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43231 is added to blk_1073741846_1023 (size=99) 2024-12-05T13:56:39,050 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46569-0x101a71134240003, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T13:56:39,050 INFO [RS:0;da6aa2204f50:46569 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T13:56:39,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46569-0x101a71134240003, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T13:56:39,051 INFO [RS:0;da6aa2204f50:46569 {}] regionserver.HRegionServer(1031): Exiting; stopping=da6aa2204f50,46569,1733406988224; zookeeper connection closed. 2024-12-05T13:56:39,051 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6aa46ee5 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6aa46ee5 2024-12-05T13:56:39,052 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-05T13:56:39,380 INFO [M:2;da6aa2204f50:44809 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-05T13:56:39,381 INFO [M:2;da6aa2204f50:44809 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-05T13:56:39,381 DEBUG [M:2;da6aa2204f50:44809 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-05T13:56:39,382 INFO [M:2;da6aa2204f50:44809 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T13:56:39,382 DEBUG [M:2;da6aa2204f50:44809 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T13:56:39,382 DEBUG [M:2;da6aa2204f50:44809 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-05T13:56:39,382 DEBUG [M:2;da6aa2204f50:44809 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T13:56:39,386 INFO [M:2;da6aa2204f50:44809 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T13:56:39,386 DEBUG [M:2;da6aa2204f50:44809 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733406999381Disabling compacts and flushes for region at 1733406999381Disabling writes for close at 1733406999382 (+1 ms)Writing region close event to WAL at 1733406999386 (+4 ms)Closed at 1733406999386 2024-12-05T13:56:39,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43231 is added to blk_1073741844_1021 (size=93) 2024-12-05T13:56:39,393 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T13:56:39,393 INFO [M:2;da6aa2204f50:44809 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-05T13:56:39,393 INFO [M:2;da6aa2204f50:44809 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44809 2024-12-05T13:56:39,393 INFO [M:2;da6aa2204f50:44809 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T13:56:39,534 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44809-0x101a71134240002, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T13:56:39,534 INFO [M:2;da6aa2204f50:44809 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T13:56:39,534 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44809-0x101a71134240002, quorum=127.0.0.1:62287, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T13:56:39,573 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7182828b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T13:56:39,578 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@11857d05{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T13:56:39,579 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T13:56:39,579 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5e882389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T13:56:39,580 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4f4c4215{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2aaa6615-3e06-822a-2602-8bbea1b11ccb/hadoop.log.dir/,STOPPED} 2024-12-05T13:56:39,584 WARN [BP-1787308914-172.17.0.2-1733406984368 heartbeating to localhost/127.0.0.1:44371 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-05T13:56:39,584 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-05T13:56:39,584 WARN [BP-1787308914-172.17.0.2-1733406984368 heartbeating to localhost/127.0.0.1:44371 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1787308914-172.17.0.2-1733406984368 (Datanode Uuid 6fec998d-ad39-446b-92bd-2efdcf2be506) service to localhost/127.0.0.1:44371 2024-12-05T13:56:39,584 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-05T13:56:39,585 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2aaa6615-3e06-822a-2602-8bbea1b11ccb/cluster_8553c555-5bc8-a989-5e4c-cbd56a079f87/data/data1/current/BP-1787308914-172.17.0.2-1733406984368 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T13:56:39,585 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2aaa6615-3e06-822a-2602-8bbea1b11ccb/cluster_8553c555-5bc8-a989-5e4c-cbd56a079f87/data/data2/current/BP-1787308914-172.17.0.2-1733406984368 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T13:56:39,585 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-05T13:56:39,592 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@26c59a36{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-05T13:56:39,593 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@40c01bb1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T13:56:39,593 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T13:56:39,593 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@61fd4728{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T13:56:39,593 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@d13f332{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2aaa6615-3e06-822a-2602-8bbea1b11ccb/hadoop.log.dir/,STOPPED} 2024-12-05T13:56:39,599 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-05T13:56:39,615 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-05T13:56:39,622 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: master.TestMasterFailoverBalancerPersistence#testMasterFailoverBalancerPersistence Thread=70 (was 11) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (301154381) connection to localhost/127.0.0.1:44371 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/da6aa2204f50:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: HMaster-EventLoopGroup-4-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44371 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@22af7935 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-4-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: process reaper java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44371 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:44371 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44371 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: HMaster-EventLoopGroup-4-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: process reaper java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/da6aa2204f50:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:44371 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: IPC Client (301154381) connection to localhost/127.0.0.1:44371 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (301154381) connection to localhost/127.0.0.1:44371 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: master/da6aa2204f50:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=385 (was 285) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=95 (was 96), ProcessCount=11 (was 11), AvailableMemoryMB=8443 (was 8990)