2024-12-08 21:05:39,677 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@6f204a1a 2024-12-08 21:05:39,692 main DEBUG Took 0.013225 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-08 21:05:39,692 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-08 21:05:39,693 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-08 21:05:39,701 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-08 21:05:39,702 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 21:05:39,710 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-08 21:05:39,726 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 21:05:39,728 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 21:05:39,729 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 21:05:39,729 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 21:05:39,730 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 21:05:39,730 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 21:05:39,731 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 21:05:39,732 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 21:05:39,732 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 21:05:39,733 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 21:05:39,734 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 21:05:39,734 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 21:05:39,735 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 21:05:39,735 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 21:05:39,736 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 21:05:39,736 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 21:05:39,737 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 21:05:39,737 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 21:05:39,738 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 21:05:39,738 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 21:05:39,739 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 21:05:39,739 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 21:05:39,740 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 21:05:39,740 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 21:05:39,740 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 21:05:39,741 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-08 21:05:39,743 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 21:05:39,744 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-08 21:05:39,747 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-08 21:05:39,748 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-08 21:05:39,749 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-08 21:05:39,750 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-08 21:05:39,761 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-08 21:05:39,765 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-08 21:05:39,768 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-08 21:05:39,768 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-08 21:05:39,769 main DEBUG createAppenders(={Console}) 2024-12-08 21:05:39,770 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@6f204a1a initialized 2024-12-08 21:05:39,770 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@6f204a1a 2024-12-08 21:05:39,771 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@6f204a1a OK. 2024-12-08 21:05:39,772 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-08 21:05:39,772 main DEBUG OutputStream closed 2024-12-08 21:05:39,773 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-08 21:05:39,773 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-08 21:05:39,773 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@2c35e847 OK 2024-12-08 21:05:39,866 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-08 21:05:39,869 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-08 21:05:39,870 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-08 21:05:39,871 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-08 21:05:39,872 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-08 21:05:39,872 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-08 21:05:39,873 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-08 21:05:39,873 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-08 21:05:39,873 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-08 21:05:39,874 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-08 21:05:39,874 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-08 21:05:39,874 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-08 21:05:39,875 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-08 21:05:39,875 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-08 21:05:39,875 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-08 21:05:39,876 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-08 21:05:39,876 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-08 21:05:39,877 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-08 21:05:39,879 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-08 21:05:39,880 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@42b02722) with optional ClassLoader: null 2024-12-08 21:05:39,880 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-08 21:05:39,881 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@42b02722] started OK. 2024-12-08T21:05:39,892 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.master.TestMasterFailoverBalancerPersistence timeout: 13 mins 2024-12-08 21:05:39,894 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-08 21:05:39,895 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-08T21:05:39,933 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: master.TestMasterFailoverBalancerPersistence#testMasterFailoverBalancerPersistence Thread=11, OpenFileDescriptor=285, MaxFileDescriptor=1048576, SystemLoadAverage=171, ProcessCount=11, AvailableMemoryMB=16731 2024-12-08T21:05:40,203 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf629df9-c31b-0364-7463-e2ec46277658 2024-12-08T21:05:40,204 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=3, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-08T21:05:40,224 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf629df9-c31b-0364-7463-e2ec46277658/cluster_9acabe47-1c98-0432-8392-c1b14ea2a8ac, deleteOnExit=true 2024-12-08T21:05:40,225 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-08T21:05:40,226 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf629df9-c31b-0364-7463-e2ec46277658/test.cache.data in system properties and HBase conf 2024-12-08T21:05:40,227 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf629df9-c31b-0364-7463-e2ec46277658/hadoop.tmp.dir in system properties and HBase conf 2024-12-08T21:05:40,228 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf629df9-c31b-0364-7463-e2ec46277658/hadoop.log.dir in system properties and HBase conf 2024-12-08T21:05:40,228 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf629df9-c31b-0364-7463-e2ec46277658/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-08T21:05:40,229 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf629df9-c31b-0364-7463-e2ec46277658/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-08T21:05:40,229 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-08T21:05:40,327 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-08T21:05:40,440 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-08T21:05:40,445 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf629df9-c31b-0364-7463-e2ec46277658/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-08T21:05:40,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf629df9-c31b-0364-7463-e2ec46277658/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-08T21:05:40,447 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf629df9-c31b-0364-7463-e2ec46277658/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-08T21:05:40,447 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf629df9-c31b-0364-7463-e2ec46277658/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T21:05:40,448 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf629df9-c31b-0364-7463-e2ec46277658/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-08T21:05:40,449 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf629df9-c31b-0364-7463-e2ec46277658/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-08T21:05:40,449 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf629df9-c31b-0364-7463-e2ec46277658/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T21:05:40,450 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf629df9-c31b-0364-7463-e2ec46277658/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T21:05:40,450 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf629df9-c31b-0364-7463-e2ec46277658/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-08T21:05:40,451 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf629df9-c31b-0364-7463-e2ec46277658/nfs.dump.dir in system properties and HBase conf 2024-12-08T21:05:40,451 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf629df9-c31b-0364-7463-e2ec46277658/java.io.tmpdir in system properties and HBase conf 2024-12-08T21:05:40,452 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf629df9-c31b-0364-7463-e2ec46277658/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T21:05:40,452 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf629df9-c31b-0364-7463-e2ec46277658/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-08T21:05:40,453 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf629df9-c31b-0364-7463-e2ec46277658/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-08T21:05:41,430 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-08T21:05:41,503 INFO [Time-limited test {}] log.Log(170): Logging initialized @2469ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-08T21:05:41,587 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T21:05:41,666 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T21:05:41,694 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T21:05:41,694 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T21:05:41,696 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T21:05:41,711 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T21:05:41,714 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@d13f332{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf629df9-c31b-0364-7463-e2ec46277658/hadoop.log.dir/,AVAILABLE} 2024-12-08T21:05:41,715 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@61fd4728{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T21:05:41,914 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@26c59a36{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf629df9-c31b-0364-7463-e2ec46277658/java.io.tmpdir/jetty-localhost-40611-hadoop-hdfs-3_4_1-tests_jar-_-any-6678620953743503617/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T21:05:41,920 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@40c01bb1{HTTP/1.1, (http/1.1)}{localhost:40611} 2024-12-08T21:05:41,921 INFO [Time-limited test {}] server.Server(415): Started @2888ms 2024-12-08T21:05:42,405 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T21:05:42,411 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T21:05:42,412 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T21:05:42,413 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T21:05:42,413 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T21:05:42,414 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4f4c4215{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf629df9-c31b-0364-7463-e2ec46277658/hadoop.log.dir/,AVAILABLE} 2024-12-08T21:05:42,414 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5e882389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T21:05:42,514 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7182828b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf629df9-c31b-0364-7463-e2ec46277658/java.io.tmpdir/jetty-localhost-42017-hadoop-hdfs-3_4_1-tests_jar-_-any-770862877553170230/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T21:05:42,515 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@11857d05{HTTP/1.1, (http/1.1)}{localhost:42017} 2024-12-08T21:05:42,515 INFO [Time-limited test {}] server.Server(415): Started @3483ms 2024-12-08T21:05:42,562 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T21:05:43,435 WARN [Thread-72 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf629df9-c31b-0364-7463-e2ec46277658/cluster_9acabe47-1c98-0432-8392-c1b14ea2a8ac/data/data1/current/BP-814579981-172.17.0.2-1733691940979/current, will proceed with Du for space computation calculation, 2024-12-08T21:05:43,435 WARN [Thread-73 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf629df9-c31b-0364-7463-e2ec46277658/cluster_9acabe47-1c98-0432-8392-c1b14ea2a8ac/data/data2/current/BP-814579981-172.17.0.2-1733691940979/current, will proceed with Du for space computation calculation, 2024-12-08T21:05:43,462 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T21:05:43,503 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x15dbf5970cae29b with lease ID 0xb34b4d0808259f89: Processing first storage report for DS-5537ffa1-b6e0-415b-b039-256d9ea9c201 from datanode DatanodeRegistration(127.0.0.1:37947, datanodeUuid=e6fa4c1f-8ff3-45f3-a7e5-f8e254ade171, infoPort=41417, infoSecurePort=0, ipcPort=35563, storageInfo=lv=-57;cid=testClusterID;nsid=2052328889;c=1733691940979) 2024-12-08T21:05:43,504 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x15dbf5970cae29b with lease ID 0xb34b4d0808259f89: from storage DS-5537ffa1-b6e0-415b-b039-256d9ea9c201 node DatanodeRegistration(127.0.0.1:37947, datanodeUuid=e6fa4c1f-8ff3-45f3-a7e5-f8e254ade171, infoPort=41417, infoSecurePort=0, ipcPort=35563, storageInfo=lv=-57;cid=testClusterID;nsid=2052328889;c=1733691940979), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-08T21:05:43,505 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x15dbf5970cae29b with lease ID 0xb34b4d0808259f89: Processing first storage report for DS-210aa0bf-8990-489f-bc71-73b0876d9bfc from datanode DatanodeRegistration(127.0.0.1:37947, datanodeUuid=e6fa4c1f-8ff3-45f3-a7e5-f8e254ade171, infoPort=41417, infoSecurePort=0, ipcPort=35563, storageInfo=lv=-57;cid=testClusterID;nsid=2052328889;c=1733691940979) 2024-12-08T21:05:43,505 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x15dbf5970cae29b with lease ID 0xb34b4d0808259f89: from storage DS-210aa0bf-8990-489f-bc71-73b0876d9bfc node DatanodeRegistration(127.0.0.1:37947, datanodeUuid=e6fa4c1f-8ff3-45f3-a7e5-f8e254ade171, infoPort=41417, infoSecurePort=0, ipcPort=35563, storageInfo=lv=-57;cid=testClusterID;nsid=2052328889;c=1733691940979), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-08T21:05:43,560 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf629df9-c31b-0364-7463-e2ec46277658 2024-12-08T21:05:43,642 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf629df9-c31b-0364-7463-e2ec46277658/cluster_9acabe47-1c98-0432-8392-c1b14ea2a8ac/zookeeper_0, clientPort=50945, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf629df9-c31b-0364-7463-e2ec46277658/cluster_9acabe47-1c98-0432-8392-c1b14ea2a8ac/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf629df9-c31b-0364-7463-e2ec46277658/cluster_9acabe47-1c98-0432-8392-c1b14ea2a8ac/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-08T21:05:43,654 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=50945 2024-12-08T21:05:43,668 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T21:05:43,671 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T21:05:43,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37947 is added to blk_1073741825_1001 (size=7) 2024-12-08T21:05:44,306 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0 with version=8 2024-12-08T21:05:44,306 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/hbase-staging 2024-12-08T21:05:44,381 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-08T21:05:44,608 INFO [Time-limited test {}] client.ConnectionUtils(128): master/229bab1f9d30:0 server-side Connection retries=45 2024-12-08T21:05:44,620 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T21:05:44,620 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T21:05:44,626 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T21:05:44,627 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T21:05:44,627 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T21:05:44,743 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-08T21:05:44,801 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-08T21:05:44,809 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-08T21:05:44,813 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T21:05:44,842 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 118372 (auto-detected) 2024-12-08T21:05:44,843 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-08T21:05:44,860 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44649 2024-12-08T21:05:44,884 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:44649 connecting to ZooKeeper ensemble=127.0.0.1:50945 2024-12-08T21:05:44,992 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:446490x0, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T21:05:44,995 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:44649-0x100074d695d0000 connected 2024-12-08T21:05:45,109 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T21:05:45,111 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T21:05:45,120 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44649-0x100074d695d0000, quorum=127.0.0.1:50945, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T21:05:45,123 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0, hbase.cluster.distributed=false 2024-12-08T21:05:45,144 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44649-0x100074d695d0000, quorum=127.0.0.1:50945, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T21:05:45,150 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44649 2024-12-08T21:05:45,151 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44649 2024-12-08T21:05:45,153 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44649 2024-12-08T21:05:45,154 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44649 2024-12-08T21:05:45,155 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44649 2024-12-08T21:05:45,160 INFO [Time-limited test {}] client.ConnectionUtils(128): master/229bab1f9d30:0 server-side Connection retries=45 2024-12-08T21:05:45,161 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T21:05:45,161 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T21:05:45,161 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T21:05:45,162 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T21:05:45,162 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T21:05:45,162 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-08T21:05:45,163 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T21:05:45,164 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37411 2024-12-08T21:05:45,166 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:37411 connecting to ZooKeeper ensemble=127.0.0.1:50945 2024-12-08T21:05:45,183 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:374110x0, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T21:05:45,183 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37411-0x100074d695d0001 connected 2024-12-08T21:05:45,198 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T21:05:45,202 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T21:05:45,207 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37411-0x100074d695d0001, quorum=127.0.0.1:50945, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T21:05:45,208 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0, hbase.cluster.distributed=false 2024-12-08T21:05:45,212 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37411-0x100074d695d0001, quorum=127.0.0.1:50945, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T21:05:45,213 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37411 2024-12-08T21:05:45,214 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37411 2024-12-08T21:05:45,214 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37411 2024-12-08T21:05:45,217 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37411 2024-12-08T21:05:45,218 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37411 2024-12-08T21:05:45,221 INFO [Time-limited test {}] client.ConnectionUtils(128): master/229bab1f9d30:0 server-side Connection retries=45 2024-12-08T21:05:45,221 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T21:05:45,222 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T21:05:45,222 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T21:05:45,222 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T21:05:45,223 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T21:05:45,223 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-08T21:05:45,223 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T21:05:45,225 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42549 2024-12-08T21:05:45,226 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42549 connecting to ZooKeeper ensemble=127.0.0.1:50945 2024-12-08T21:05:45,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:425490x0, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T21:05:45,236 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42549-0x100074d695d0002 connected 2024-12-08T21:05:45,246 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T21:05:45,249 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T21:05:45,253 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42549-0x100074d695d0002, quorum=127.0.0.1:50945, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T21:05:45,254 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0, hbase.cluster.distributed=false 2024-12-08T21:05:45,256 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42549-0x100074d695d0002, quorum=127.0.0.1:50945, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T21:05:45,257 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42549 2024-12-08T21:05:45,257 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42549 2024-12-08T21:05:45,258 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42549 2024-12-08T21:05:45,258 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42549 2024-12-08T21:05:45,259 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42549 2024-12-08T21:05:45,351 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/229bab1f9d30:0 server-side Connection retries=45 2024-12-08T21:05:45,352 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T21:05:45,352 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T21:05:45,353 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T21:05:45,353 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T21:05:45,353 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T21:05:45,355 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-08T21:05:45,357 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T21:05:45,358 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39161 2024-12-08T21:05:45,360 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39161 connecting to ZooKeeper ensemble=127.0.0.1:50945 2024-12-08T21:05:45,361 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T21:05:45,363 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T21:05:45,391 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:391610x0, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T21:05:45,391 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:391610x0, quorum=127.0.0.1:50945, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T21:05:45,392 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39161-0x100074d695d0003 connected 2024-12-08T21:05:45,395 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-08T21:05:45,402 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-08T21:05:45,405 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39161-0x100074d695d0003, quorum=127.0.0.1:50945, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-08T21:05:45,412 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39161-0x100074d695d0003, quorum=127.0.0.1:50945, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T21:05:45,413 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39161 2024-12-08T21:05:45,413 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39161 2024-12-08T21:05:45,414 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39161 2024-12-08T21:05:45,414 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39161 2024-12-08T21:05:45,414 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39161 2024-12-08T21:05:45,428 DEBUG [M:0;229bab1f9d30:44649 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;229bab1f9d30:44649 2024-12-08T21:05:45,429 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/229bab1f9d30,44649,1733691944456 2024-12-08T21:05:45,430 DEBUG [M:1;229bab1f9d30:37411 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:1;229bab1f9d30:37411 2024-12-08T21:05:45,431 DEBUG [M:2;229bab1f9d30:42549 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:2;229bab1f9d30:42549 2024-12-08T21:05:45,431 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/229bab1f9d30,37411,1733691945160 2024-12-08T21:05:45,431 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/229bab1f9d30,42549,1733691945220 2024-12-08T21:05:45,465 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39161-0x100074d695d0003, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T21:05:45,465 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37411-0x100074d695d0001, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T21:05:45,465 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44649-0x100074d695d0000, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T21:05:45,465 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42549-0x100074d695d0002, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T21:05:45,474 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42549-0x100074d695d0002, quorum=127.0.0.1:50945, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/229bab1f9d30,42549,1733691945220 2024-12-08T21:05:45,475 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37411-0x100074d695d0001, quorum=127.0.0.1:50945, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/229bab1f9d30,37411,1733691945160 2024-12-08T21:05:45,475 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44649-0x100074d695d0000, quorum=127.0.0.1:50945, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/229bab1f9d30,44649,1733691944456 2024-12-08T21:05:45,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42549-0x100074d695d0002, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T21:05:45,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37411-0x100074d695d0001, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T21:05:45,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39161-0x100074d695d0003, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-08T21:05:45,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39161-0x100074d695d0003, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T21:05:45,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44649-0x100074d695d0000, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T21:05:45,507 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42549-0x100074d695d0002, quorum=127.0.0.1:50945, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-08T21:05:45,507 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37411-0x100074d695d0001, quorum=127.0.0.1:50945, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-08T21:05:45,508 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44649-0x100074d695d0000, quorum=127.0.0.1:50945, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-08T21:05:45,508 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/229bab1f9d30,42549,1733691945220 from backup master directory 2024-12-08T21:05:45,509 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.ActiveMasterManager(296): Another master is the active master, 229bab1f9d30,42549,1733691945220; waiting to become the next active master 2024-12-08T21:05:45,509 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.ActiveMasterManager(296): Another master is the active master, 229bab1f9d30,42549,1733691945220; waiting to become the next active master 2024-12-08T21:05:45,516 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39161-0x100074d695d0003, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T21:05:45,516 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44649-0x100074d695d0000, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T21:05:45,516 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37411-0x100074d695d0001, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T21:05:45,516 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42549-0x100074d695d0002, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/229bab1f9d30,42549,1733691945220 2024-12-08T21:05:45,516 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42549-0x100074d695d0002, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T21:05:45,517 WARN [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T21:05:45,518 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=229bab1f9d30,42549,1733691945220 2024-12-08T21:05:45,521 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-08T21:05:45,523 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-08T21:05:45,585 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/hbase.id] with ID: 74dc97a9-31f9-4643-91c3-540913d08c6c 2024-12-08T21:05:45,585 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/.tmp/hbase.id 2024-12-08T21:05:45,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37947 is added to blk_1073741826_1002 (size=42) 2024-12-08T21:05:45,997 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/.tmp/hbase.id]:[hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/hbase.id] 2024-12-08T21:05:46,039 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T21:05:46,043 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-08T21:05:46,061 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 16ms. 2024-12-08T21:05:46,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39161-0x100074d695d0003, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T21:05:46,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44649-0x100074d695d0000, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T21:05:46,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37411-0x100074d695d0001, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T21:05:46,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42549-0x100074d695d0002, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T21:05:46,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37947 is added to blk_1073741827_1003 (size=196) 2024-12-08T21:05:46,150 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T21:05:46,153 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-08T21:05:46,171 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:150) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:174) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:262) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:231) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:400) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:1003) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2535) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:613) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.lambda$tracedRunnable$2(TraceUtil.java:155) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T21:05:46,175 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-08T21:05:46,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37947 is added to blk_1073741828_1004 (size=1189) 2024-12-08T21:05:46,624 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/data/master/store 2024-12-08T21:05:46,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37947 is added to blk_1073741829_1005 (size=34) 2024-12-08T21:05:47,052 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-08T21:05:47,057 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T21:05:47,059 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T21:05:47,059 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T21:05:47,059 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T21:05:47,061 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T21:05:47,061 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T21:05:47,062 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T21:05:47,063 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733691947059Disabling compacts and flushes for region at 1733691947059Disabling writes for close at 1733691947061 (+2 ms)Writing region close event to WAL at 1733691947062 (+1 ms)Closed at 1733691947062 2024-12-08T21:05:47,065 WARN [master/229bab1f9d30:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/data/master/store/.initializing 2024-12-08T21:05:47,065 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/WALs/229bab1f9d30,42549,1733691945220 2024-12-08T21:05:47,073 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-08T21:05:47,086 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=229bab1f9d30%2C42549%2C1733691945220, suffix=, logDir=hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/WALs/229bab1f9d30,42549,1733691945220, archiveDir=hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/oldWALs, maxLogs=10 2024-12-08T21:05:47,105 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/WALs/229bab1f9d30,42549,1733691945220/229bab1f9d30%2C42549%2C1733691945220.1733691947090, exclude list is [], retry=0 2024-12-08T21:05:47,119 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37947,DS-5537ffa1-b6e0-415b-b039-256d9ea9c201,DISK] 2024-12-08T21:05:47,122 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-08T21:05:47,151 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/WALs/229bab1f9d30,42549,1733691945220/229bab1f9d30%2C42549%2C1733691945220.1733691947090 2024-12-08T21:05:47,152 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41417:41417)] 2024-12-08T21:05:47,152 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-08T21:05:47,152 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T21:05:47,155 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T21:05:47,156 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T21:05:47,187 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T21:05:47,209 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-08T21:05:47,212 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T21:05:47,214 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T21:05:47,215 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T21:05:47,218 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-08T21:05:47,218 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T21:05:47,219 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T21:05:47,219 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T21:05:47,222 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-08T21:05:47,222 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T21:05:47,223 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T21:05:47,223 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T21:05:47,226 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-08T21:05:47,226 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T21:05:47,227 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T21:05:47,227 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T21:05:47,230 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T21:05:47,231 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T21:05:47,236 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T21:05:47,237 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T21:05:47,241 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-08T21:05:47,245 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T21:05:47,249 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T21:05:47,250 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61389859, jitterRate=-0.08521981537342072}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-08T21:05:47,256 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733691947167Initializing all the Stores at 1733691947169 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733691947169Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733691947170 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733691947170Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733691947170Cleaning up temporary data from old regions at 1733691947237 (+67 ms)Region opened successfully at 1733691947255 (+18 ms) 2024-12-08T21:05:47,257 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-08T21:05:47,287 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1907643c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=229bab1f9d30/172.17.0.2:0 2024-12-08T21:05:47,317 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-08T21:05:47,330 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-08T21:05:47,330 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-08T21:05:47,333 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-08T21:05:47,335 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-08T21:05:47,341 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 5 msec 2024-12-08T21:05:47,341 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-08T21:05:47,369 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-08T21:05:47,377 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42549-0x100074d695d0002, quorum=127.0.0.1:50945, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-08T21:05:47,431 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-08T21:05:47,435 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-08T21:05:47,437 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42549-0x100074d695d0002, quorum=127.0.0.1:50945, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-08T21:05:47,448 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-08T21:05:47,450 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-08T21:05:47,454 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42549-0x100074d695d0002, quorum=127.0.0.1:50945, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-08T21:05:47,465 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-08T21:05:47,466 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42549-0x100074d695d0002, quorum=127.0.0.1:50945, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-08T21:05:47,473 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-08T21:05:47,494 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42549-0x100074d695d0002, quorum=127.0.0.1:50945, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-08T21:05:47,498 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-08T21:05:47,507 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39161-0x100074d695d0003, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T21:05:47,507 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42549-0x100074d695d0002, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T21:05:47,507 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44649-0x100074d695d0000, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T21:05:47,507 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37411-0x100074d695d0001, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T21:05:47,507 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39161-0x100074d695d0003, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T21:05:47,507 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42549-0x100074d695d0002, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T21:05:47,507 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37411-0x100074d695d0001, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T21:05:47,507 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44649-0x100074d695d0000, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T21:05:47,510 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=229bab1f9d30,42549,1733691945220, sessionid=0x100074d695d0002, setting cluster-up flag (Was=false) 2024-12-08T21:05:47,532 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39161-0x100074d695d0003, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T21:05:47,532 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42549-0x100074d695d0002, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T21:05:47,532 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37411-0x100074d695d0001, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T21:05:47,532 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44649-0x100074d695d0000, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T21:05:47,556 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-08T21:05:47,558 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=229bab1f9d30,42549,1733691945220 2024-12-08T21:05:47,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39161-0x100074d695d0003, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T21:05:47,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42549-0x100074d695d0002, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T21:05:47,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44649-0x100074d695d0000, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T21:05:47,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37411-0x100074d695d0001, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T21:05:47,599 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-08T21:05:47,603 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=229bab1f9d30,42549,1733691945220 2024-12-08T21:05:47,612 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-08T21:05:47,625 INFO [RS:0;229bab1f9d30:39161 {}] regionserver.HRegionServer(746): ClusterId : 74dc97a9-31f9-4643-91c3-540913d08c6c 2024-12-08T21:05:47,627 DEBUG [RS:0;229bab1f9d30:39161 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-08T21:05:47,633 DEBUG [RS:0;229bab1f9d30:39161 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-08T21:05:47,633 DEBUG [RS:0;229bab1f9d30:39161 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-08T21:05:47,641 DEBUG [RS:0;229bab1f9d30:39161 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-08T21:05:47,642 DEBUG [RS:0;229bab1f9d30:39161 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37a497fb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=229bab1f9d30/172.17.0.2:0 2024-12-08T21:05:47,659 DEBUG [RS:0;229bab1f9d30:39161 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;229bab1f9d30:39161 2024-12-08T21:05:47,663 INFO [RS:0;229bab1f9d30:39161 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-08T21:05:47,664 INFO [RS:0;229bab1f9d30:39161 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-08T21:05:47,664 DEBUG [RS:0;229bab1f9d30:39161 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-08T21:05:47,666 INFO [RS:0;229bab1f9d30:39161 {}] regionserver.HRegionServer(2659): reportForDuty to master=229bab1f9d30,42549,1733691945220 with port=39161, startcode=1733691945319 2024-12-08T21:05:47,678 DEBUG [RS:0;229bab1f9d30:39161 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-08T21:05:47,681 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-08T21:05:47,692 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-08T21:05:47,699 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-08T21:05:47,705 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 229bab1f9d30,42549,1733691945220 Number of backup masters: 2 229bab1f9d30,37411,1733691945160 229bab1f9d30,44649,1733691944456 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-08T21:05:47,714 INFO [HMaster-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42977, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-08T21:05:47,715 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/229bab1f9d30:0, corePoolSize=5, maxPoolSize=5 2024-12-08T21:05:47,715 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/229bab1f9d30:0, corePoolSize=5, maxPoolSize=5 2024-12-08T21:05:47,715 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/229bab1f9d30:0, corePoolSize=5, maxPoolSize=5 2024-12-08T21:05:47,715 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/229bab1f9d30:0, corePoolSize=5, maxPoolSize=5 2024-12-08T21:05:47,715 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/229bab1f9d30:0, corePoolSize=10, maxPoolSize=10 2024-12-08T21:05:47,716 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T21:05:47,716 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/229bab1f9d30:0, corePoolSize=2, maxPoolSize=2 2024-12-08T21:05:47,716 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T21:05:47,720 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42549 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:47,726 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733691977726 2024-12-08T21:05:47,726 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T21:05:47,726 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-08T21:05:47,727 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-08T21:05:47,728 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-08T21:05:47,732 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-08T21:05:47,732 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-08T21:05:47,733 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-08T21:05:47,733 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-08T21:05:47,733 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T21:05:47,733 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-08T21:05:47,733 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T21:05:47,737 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-08T21:05:47,739 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-08T21:05:47,739 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-08T21:05:47,742 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-08T21:05:47,743 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-08T21:05:47,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37947 is added to blk_1073741831_1007 (size=1321) 2024-12-08T21:05:47,745 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/229bab1f9d30:0:becomeActiveMaster-HFileCleaner.large.0-1733691947744,5,FailOnTimeoutGroup] 2024-12-08T21:05:47,745 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/229bab1f9d30:0:becomeActiveMaster-HFileCleaner.small.0-1733691947745,5,FailOnTimeoutGroup] 2024-12-08T21:05:47,745 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T21:05:47,746 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-08T21:05:47,747 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-08T21:05:47,747 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-08T21:05:47,750 DEBUG [RS:0;229bab1f9d30:39161 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-08T21:05:47,750 WARN [RS:0;229bab1f9d30:39161 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-08T21:05:47,853 INFO [RS:0;229bab1f9d30:39161 {}] regionserver.HRegionServer(2659): reportForDuty to master=229bab1f9d30,42549,1733691945220 with port=39161, startcode=1733691945319 2024-12-08T21:05:47,856 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42549 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 229bab1f9d30,39161,1733691945319 2024-12-08T21:05:47,857 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42549 {}] master.ServerManager(517): Registering regionserver=229bab1f9d30,39161,1733691945319 2024-12-08T21:05:47,864 DEBUG [RS:0;229bab1f9d30:39161 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0 2024-12-08T21:05:47,864 DEBUG [RS:0;229bab1f9d30:39161 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44471 2024-12-08T21:05:47,864 DEBUG [RS:0;229bab1f9d30:39161 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-08T21:05:47,913 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42549-0x100074d695d0002, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T21:05:47,913 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37411-0x100074d695d0001, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T21:05:47,913 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44649-0x100074d695d0000, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T21:05:47,914 DEBUG [RS:0;229bab1f9d30:39161 {}] zookeeper.ZKUtil(111): regionserver:39161-0x100074d695d0003, quorum=127.0.0.1:50945, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/229bab1f9d30,39161,1733691945319 2024-12-08T21:05:47,915 WARN [RS:0;229bab1f9d30:39161 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T21:05:47,915 INFO [RS:0;229bab1f9d30:39161 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-08T21:05:47,915 DEBUG [RS:0;229bab1f9d30:39161 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/WALs/229bab1f9d30,39161,1733691945319 2024-12-08T21:05:47,917 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [229bab1f9d30,39161,1733691945319] 2024-12-08T21:05:47,940 INFO [RS:0;229bab1f9d30:39161 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-08T21:05:47,952 INFO [RS:0;229bab1f9d30:39161 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-08T21:05:47,956 INFO [RS:0;229bab1f9d30:39161 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-08T21:05:47,957 INFO [RS:0;229bab1f9d30:39161 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T21:05:47,957 INFO [RS:0;229bab1f9d30:39161 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-08T21:05:47,962 INFO [RS:0;229bab1f9d30:39161 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-08T21:05:47,963 INFO [RS:0;229bab1f9d30:39161 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-08T21:05:47,963 DEBUG [RS:0;229bab1f9d30:39161 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T21:05:47,964 DEBUG [RS:0;229bab1f9d30:39161 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T21:05:47,964 DEBUG [RS:0;229bab1f9d30:39161 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T21:05:47,964 DEBUG [RS:0;229bab1f9d30:39161 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T21:05:47,964 DEBUG [RS:0;229bab1f9d30:39161 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T21:05:47,964 DEBUG [RS:0;229bab1f9d30:39161 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/229bab1f9d30:0, corePoolSize=2, maxPoolSize=2 2024-12-08T21:05:47,965 DEBUG [RS:0;229bab1f9d30:39161 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T21:05:47,965 DEBUG [RS:0;229bab1f9d30:39161 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T21:05:47,965 DEBUG [RS:0;229bab1f9d30:39161 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T21:05:47,965 DEBUG [RS:0;229bab1f9d30:39161 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T21:05:47,965 DEBUG [RS:0;229bab1f9d30:39161 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T21:05:47,965 DEBUG [RS:0;229bab1f9d30:39161 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T21:05:47,965 DEBUG [RS:0;229bab1f9d30:39161 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/229bab1f9d30:0, corePoolSize=3, maxPoolSize=3 2024-12-08T21:05:47,966 DEBUG [RS:0;229bab1f9d30:39161 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/229bab1f9d30:0, corePoolSize=3, maxPoolSize=3 2024-12-08T21:05:47,966 INFO [RS:0;229bab1f9d30:39161 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T21:05:47,967 INFO [RS:0;229bab1f9d30:39161 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T21:05:47,967 INFO [RS:0;229bab1f9d30:39161 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T21:05:47,967 INFO [RS:0;229bab1f9d30:39161 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-08T21:05:47,967 INFO [RS:0;229bab1f9d30:39161 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-08T21:05:47,967 INFO [RS:0;229bab1f9d30:39161 {}] hbase.ChoreService(168): Chore ScheduledChore name=229bab1f9d30,39161,1733691945319-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T21:05:47,981 INFO [RS:0;229bab1f9d30:39161 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-08T21:05:47,983 INFO [RS:0;229bab1f9d30:39161 {}] hbase.ChoreService(168): Chore ScheduledChore name=229bab1f9d30,39161,1733691945319-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T21:05:47,983 INFO [RS:0;229bab1f9d30:39161 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T21:05:47,983 INFO [RS:0;229bab1f9d30:39161 {}] regionserver.Replication(171): 229bab1f9d30,39161,1733691945319 started 2024-12-08T21:05:47,998 INFO [RS:0;229bab1f9d30:39161 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T21:05:47,998 INFO [RS:0;229bab1f9d30:39161 {}] regionserver.HRegionServer(1482): Serving as 229bab1f9d30,39161,1733691945319, RpcServer on 229bab1f9d30/172.17.0.2:39161, sessionid=0x100074d695d0003 2024-12-08T21:05:47,999 DEBUG [RS:0;229bab1f9d30:39161 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-08T21:05:47,999 DEBUG [RS:0;229bab1f9d30:39161 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 229bab1f9d30,39161,1733691945319 2024-12-08T21:05:47,999 DEBUG [RS:0;229bab1f9d30:39161 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '229bab1f9d30,39161,1733691945319' 2024-12-08T21:05:47,999 DEBUG [RS:0;229bab1f9d30:39161 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-08T21:05:48,000 DEBUG [RS:0;229bab1f9d30:39161 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-08T21:05:48,001 DEBUG [RS:0;229bab1f9d30:39161 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-08T21:05:48,001 DEBUG [RS:0;229bab1f9d30:39161 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-08T21:05:48,001 DEBUG [RS:0;229bab1f9d30:39161 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 229bab1f9d30,39161,1733691945319 2024-12-08T21:05:48,001 DEBUG [RS:0;229bab1f9d30:39161 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '229bab1f9d30,39161,1733691945319' 2024-12-08T21:05:48,001 DEBUG [RS:0;229bab1f9d30:39161 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-08T21:05:48,002 DEBUG [RS:0;229bab1f9d30:39161 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-08T21:05:48,002 DEBUG [RS:0;229bab1f9d30:39161 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-08T21:05:48,002 INFO [RS:0;229bab1f9d30:39161 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-08T21:05:48,002 INFO [RS:0;229bab1f9d30:39161 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-08T21:05:48,111 INFO [RS:0;229bab1f9d30:39161 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-08T21:05:48,116 INFO [RS:0;229bab1f9d30:39161 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=229bab1f9d30%2C39161%2C1733691945319, suffix=, logDir=hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/WALs/229bab1f9d30,39161,1733691945319, archiveDir=hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/oldWALs, maxLogs=32 2024-12-08T21:05:48,129 DEBUG [RS:0;229bab1f9d30:39161 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/WALs/229bab1f9d30,39161,1733691945319/229bab1f9d30%2C39161%2C1733691945319.1733691948117, exclude list is [], retry=0 2024-12-08T21:05:48,133 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37947,DS-5537ffa1-b6e0-415b-b039-256d9ea9c201,DISK] 2024-12-08T21:05:48,136 INFO [RS:0;229bab1f9d30:39161 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/WALs/229bab1f9d30,39161,1733691945319/229bab1f9d30%2C39161%2C1733691945319.1733691948117 2024-12-08T21:05:48,136 DEBUG [RS:0;229bab1f9d30:39161 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41417:41417)] 2024-12-08T21:05:48,145 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-08T21:05:48,146 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0 2024-12-08T21:05:48,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37947 is added to blk_1073741833_1009 (size=32) 2024-12-08T21:05:48,557 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T21:05:48,559 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T21:05:48,562 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T21:05:48,562 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T21:05:48,563 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T21:05:48,563 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-08T21:05:48,566 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-08T21:05:48,566 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T21:05:48,567 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T21:05:48,568 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T21:05:48,570 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T21:05:48,570 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T21:05:48,572 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T21:05:48,572 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T21:05:48,574 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T21:05:48,575 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T21:05:48,576 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T21:05:48,576 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-08T21:05:48,578 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/data/hbase/meta/1588230740 2024-12-08T21:05:48,578 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/data/hbase/meta/1588230740 2024-12-08T21:05:48,581 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-08T21:05:48,581 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-08T21:05:48,582 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-08T21:05:48,584 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-08T21:05:48,588 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T21:05:48,590 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69060712, jitterRate=0.02908480167388916}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-08T21:05:48,592 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733691948557Initializing all the Stores at 1733691948559 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733691948559Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733691948559Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733691948559Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733691948559Cleaning up temporary data from old regions at 1733691948581 (+22 ms)Region opened successfully at 1733691948591 (+10 ms) 2024-12-08T21:05:48,592 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-08T21:05:48,592 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-08T21:05:48,592 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-08T21:05:48,592 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T21:05:48,592 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T21:05:48,593 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-08T21:05:48,593 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733691948592Disabling compacts and flushes for region at 1733691948592Disabling writes for close at 1733691948592Writing region close event to WAL at 1733691948593 (+1 ms)Closed at 1733691948593 2024-12-08T21:05:48,597 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T21:05:48,597 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-08T21:05:48,603 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-08T21:05:48,611 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T21:05:48,614 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-08T21:05:48,769 DEBUG [229bab1f9d30:42549 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-08T21:05:48,785 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=229bab1f9d30,39161,1733691945319 2024-12-08T21:05:48,790 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 229bab1f9d30,39161,1733691945319, state=OPENING 2024-12-08T21:05:48,814 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-08T21:05:48,823 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39161-0x100074d695d0003, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T21:05:48,823 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37411-0x100074d695d0001, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T21:05:48,823 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42549-0x100074d695d0002, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T21:05:48,823 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44649-0x100074d695d0000, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T21:05:48,824 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T21:05:48,824 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T21:05:48,825 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T21:05:48,825 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T21:05:48,827 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T21:05:48,830 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=229bab1f9d30,39161,1733691945319}] 2024-12-08T21:05:49,003 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-08T21:05:49,007 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40403, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-08T21:05:49,016 INFO [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-08T21:05:49,017 INFO [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-08T21:05:49,017 INFO [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-08T21:05:49,021 INFO [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=229bab1f9d30%2C39161%2C1733691945319.meta, suffix=.meta, logDir=hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/WALs/229bab1f9d30,39161,1733691945319, archiveDir=hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/oldWALs, maxLogs=32 2024-12-08T21:05:49,036 DEBUG [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/WALs/229bab1f9d30,39161,1733691945319/229bab1f9d30%2C39161%2C1733691945319.meta.1733691949022.meta, exclude list is [], retry=0 2024-12-08T21:05:49,040 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37947,DS-5537ffa1-b6e0-415b-b039-256d9ea9c201,DISK] 2024-12-08T21:05:49,043 INFO [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/WALs/229bab1f9d30,39161,1733691945319/229bab1f9d30%2C39161%2C1733691945319.meta.1733691949022.meta 2024-12-08T21:05:49,043 DEBUG [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41417:41417)] 2024-12-08T21:05:49,044 DEBUG [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-08T21:05:49,045 DEBUG [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-08T21:05:49,048 DEBUG [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-08T21:05:49,052 INFO [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-08T21:05:49,056 DEBUG [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-08T21:05:49,056 DEBUG [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T21:05:49,057 DEBUG [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-08T21:05:49,057 DEBUG [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-08T21:05:49,060 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T21:05:49,061 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T21:05:49,062 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T21:05:49,062 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T21:05:49,063 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-08T21:05:49,064 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-08T21:05:49,065 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T21:05:49,066 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T21:05:49,066 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T21:05:49,067 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T21:05:49,068 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T21:05:49,068 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T21:05:49,069 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T21:05:49,070 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T21:05:49,070 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T21:05:49,071 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T21:05:49,072 DEBUG [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-08T21:05:49,073 DEBUG [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/data/hbase/meta/1588230740 2024-12-08T21:05:49,076 DEBUG [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/data/hbase/meta/1588230740 2024-12-08T21:05:49,078 DEBUG [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-08T21:05:49,078 DEBUG [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-08T21:05:49,079 DEBUG [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-08T21:05:49,083 DEBUG [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-08T21:05:49,085 INFO [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73915392, jitterRate=0.1014251708984375}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-08T21:05:49,085 DEBUG [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-08T21:05:49,086 DEBUG [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733691949057Writing region info on filesystem at 1733691949057Initializing all the Stores at 1733691949059 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733691949059Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733691949059Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733691949060 (+1 ms)Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733691949060Cleaning up temporary data from old regions at 1733691949079 (+19 ms)Running coprocessor post-open hooks at 1733691949085 (+6 ms)Region opened successfully at 1733691949086 (+1 ms) 2024-12-08T21:05:49,092 INFO [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733691948996 2024-12-08T21:05:49,104 DEBUG [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-08T21:05:49,104 INFO [RS_OPEN_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-08T21:05:49,106 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=229bab1f9d30,39161,1733691945319 2024-12-08T21:05:49,108 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 229bab1f9d30,39161,1733691945319, state=OPEN 2024-12-08T21:05:49,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39161-0x100074d695d0003, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T21:05:49,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44649-0x100074d695d0000, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T21:05:49,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37411-0x100074d695d0001, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T21:05:49,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42549-0x100074d695d0002, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T21:05:49,132 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T21:05:49,132 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T21:05:49,132 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T21:05:49,132 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T21:05:49,132 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=229bab1f9d30,39161,1733691945319 2024-12-08T21:05:49,137 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-08T21:05:49,137 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=229bab1f9d30,39161,1733691945319 in 303 msec 2024-12-08T21:05:49,143 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-08T21:05:49,143 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 536 msec 2024-12-08T21:05:49,144 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-08T21:05:49,144 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-08T21:05:49,161 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T21:05:49,162 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=229bab1f9d30,39161,1733691945319, seqNum=-1] 2024-12-08T21:05:49,181 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T21:05:49,184 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39569, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T21:05:49,208 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.5670 sec 2024-12-08T21:05:49,208 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733691949208, completionTime=-1 2024-12-08T21:05:49,211 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-08T21:05:49,211 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-08T21:05:49,255 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-08T21:05:49,255 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733692009255 2024-12-08T21:05:49,255 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733692069255 2024-12-08T21:05:49,255 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 44 msec 2024-12-08T21:05:49,258 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=229bab1f9d30,42549,1733691945220-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T21:05:49,258 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=229bab1f9d30,42549,1733691945220-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T21:05:49,258 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=229bab1f9d30,42549,1733691945220-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T21:05:49,260 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-229bab1f9d30:42549, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T21:05:49,261 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-08T21:05:49,266 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-08T21:05:49,271 DEBUG [master/229bab1f9d30:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-08T21:05:49,299 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 3.780sec 2024-12-08T21:05:49,300 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-08T21:05:49,301 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-08T21:05:49,302 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-08T21:05:49,302 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-08T21:05:49,302 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-08T21:05:49,303 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=229bab1f9d30,42549,1733691945220-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T21:05:49,303 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=229bab1f9d30,42549,1733691945220-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-08T21:05:49,311 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-08T21:05:49,312 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-08T21:05:49,312 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=229bab1f9d30,42549,1733691945220-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T21:05:49,335 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54c2d4b8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T21:05:49,336 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-08T21:05:49,336 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-08T21:05:49,339 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 229bab1f9d30,37411,-1 for getting cluster id 2024-12-08T21:05:49,341 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-08T21:05:49,352 DEBUG [HMaster-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '74dc97a9-31f9-4643-91c3-540913d08c6c' 2024-12-08T21:05:49,355 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-08T21:05:49,355 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "74dc97a9-31f9-4643-91c3-540913d08c6c" 2024-12-08T21:05:49,356 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@993125d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T21:05:49,356 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [229bab1f9d30,42549,-1, 229bab1f9d30,37411,-1, 229bab1f9d30,44649,-1] 2024-12-08T21:05:49,357 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-08T21:05:49,359 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-08T21:05:49,360 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T21:05:49,361 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43770, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-08T21:05:49,362 INFO [HMaster-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46838, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-08T21:05:49,364 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@13b392dc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T21:05:49,365 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T21:05:49,367 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-08T21:05:49,371 INFO [HMaster-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39068, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-08T21:05:49,373 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=229bab1f9d30,39161,1733691945319, seqNum=-1] 2024-12-08T21:05:49,373 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T21:05:49,375 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52830, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T21:05:49,392 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=229bab1f9d30,42549,1733691945220 2024-12-08T21:05:49,392 INFO [Time-limited test {}] hbase.SingleProcessHBaseCluster(618): Stopping Thread[M:2;229bab1f9d30:42549,5,FailOnTimeoutGroup] 2024-12-08T21:05:49,393 INFO [Time-limited test {}] master.HMaster(3321): ***** STOPPING master '229bab1f9d30,42549,1733691945220' ***** 2024-12-08T21:05:49,393 INFO [Time-limited test {}] master.HMaster(3323): STOPPED: Stopping master 2 2024-12-08T21:05:49,393 INFO [M:2;229bab1f9d30:42549 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T21:05:49,394 INFO [M:2;229bab1f9d30:42549 {}] client.AsyncConnectionImpl(233): Connection has been closed by M:2;229bab1f9d30:42549. 2024-12-08T21:05:49,394 DEBUG [M:2;229bab1f9d30:42549 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:630) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T21:05:49,398 DEBUG [M:2;229bab1f9d30:42549 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T21:05:49,398 INFO [M:2;229bab1f9d30:42549 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T21:05:49,398 DEBUG [M:2;229bab1f9d30:42549 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-08T21:05:49,398 DEBUG [M:2;229bab1f9d30:42549 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-08T21:05:49,398 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-08T21:05:49,398 DEBUG [master/229bab1f9d30:0:becomeActiveMaster-HFileCleaner.large.0-1733691947744 {}] cleaner.HFileCleaner(306): Exit Thread[master/229bab1f9d30:0:becomeActiveMaster-HFileCleaner.large.0-1733691947744,5,FailOnTimeoutGroup] 2024-12-08T21:05:49,398 DEBUG [master/229bab1f9d30:0:becomeActiveMaster-HFileCleaner.small.0-1733691947745 {}] cleaner.HFileCleaner(306): Exit Thread[master/229bab1f9d30:0:becomeActiveMaster-HFileCleaner.small.0-1733691947745,5,FailOnTimeoutGroup] 2024-12-08T21:05:49,399 INFO [M:2;229bab1f9d30:42549 {}] hbase.ChoreService(370): Chore service for: master/229bab1f9d30:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-08T21:05:49,399 INFO [M:2;229bab1f9d30:42549 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T21:05:49,399 DEBUG [M:2;229bab1f9d30:42549 {}] master.HMaster(1795): Stopping service threads 2024-12-08T21:05:49,399 INFO [M:2;229bab1f9d30:42549 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-08T21:05:49,399 INFO [M:2;229bab1f9d30:42549 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T21:05:49,400 INFO [M:2;229bab1f9d30:42549 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-08T21:05:49,400 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-08T21:05:49,414 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39161-0x100074d695d0003, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-08T21:05:49,414 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42549-0x100074d695d0002, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-08T21:05:49,414 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37411-0x100074d695d0001, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-08T21:05:49,415 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44649-0x100074d695d0000, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-08T21:05:49,415 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42549-0x100074d695d0002, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T21:05:49,415 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44649-0x100074d695d0000, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T21:05:49,415 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37411-0x100074d695d0001, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T21:05:49,415 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39161-0x100074d695d0003, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T21:05:49,415 INFO [Time-limited test {}] hbase.LocalHBaseCluster(362): Waiting on 229bab1f9d30,42549,1733691945220 2024-12-08T21:05:49,415 DEBUG [M:2;229bab1f9d30:42549 {}] zookeeper.ZKUtil(347): master:42549-0x100074d695d0002, quorum=127.0.0.1:50945, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-08T21:05:49,415 WARN [M:2;229bab1f9d30:42549 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-08T21:05:49,415 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39161-0x100074d695d0003, quorum=127.0.0.1:50945, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-08T21:05:49,415 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44649-0x100074d695d0000, quorum=127.0.0.1:50945, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-08T21:05:49,415 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37411-0x100074d695d0001, quorum=127.0.0.1:50945, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-08T21:05:49,415 DEBUG [zk-event-processor-pool-0 {}] master.ActiveMasterManager(204): No master available. Notifying waiting threads 2024-12-08T21:05:49,415 DEBUG [zk-event-processor-pool-0 {}] master.ActiveMasterManager(204): No master available. Notifying waiting threads 2024-12-08T21:05:49,416 INFO [M:2;229bab1f9d30:42549 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/.lastflushedseqids 2024-12-08T21:05:49,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44649-0x100074d695d0000, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-08T21:05:49,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42549-0x100074d695d0002, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T21:05:49,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39161-0x100074d695d0003, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-08T21:05:49,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44649-0x100074d695d0000, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T21:05:49,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39161-0x100074d695d0003, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T21:05:49,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37411-0x100074d695d0001, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-08T21:05:49,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37411-0x100074d695d0001, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T21:05:49,424 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44649-0x100074d695d0000, quorum=127.0.0.1:50945, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-08T21:05:49,424 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37411-0x100074d695d0001, quorum=127.0.0.1:50945, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-08T21:05:49,425 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:44649-0x100074d695d0000, quorum=127.0.0.1:50945, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-08T21:05:49,425 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:37411-0x100074d695d0001, quorum=127.0.0.1:50945, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-08T21:05:49,425 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/229bab1f9d30,37411,1733691945160 from backup master directory 2024-12-08T21:05:49,425 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.ActiveMasterManager(296): Another master is the active master, 229bab1f9d30,37411,1733691945160; waiting to become the next active master 2024-12-08T21:05:49,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37947 is added to blk_1073741835_1011 (size=99) 2024-12-08T21:05:49,431 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44649-0x100074d695d0000, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T21:05:49,431 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37411-0x100074d695d0001, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/229bab1f9d30,37411,1733691945160 2024-12-08T21:05:49,431 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39161-0x100074d695d0003, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T21:05:49,431 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42549-0x100074d695d0002, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T21:05:49,431 WARN [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T21:05:49,431 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37411-0x100074d695d0001, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T21:05:49,431 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=229bab1f9d30,37411,1733691945160 2024-12-08T21:05:49,453 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T21:05:49,453 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-08T21:05:49,465 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=1) cost 12ms. 2024-12-08T21:05:49,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37947 is added to blk_1073741836_1012 (size=196) 2024-12-08T21:05:49,831 INFO [M:2;229bab1f9d30:42549 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-08T21:05:49,832 INFO [M:2;229bab1f9d30:42549 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-08T21:05:49,832 DEBUG [M:2;229bab1f9d30:42549 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T21:05:49,832 INFO [M:2;229bab1f9d30:42549 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T21:05:49,832 DEBUG [M:2;229bab1f9d30:42549 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T21:05:49,833 DEBUG [M:2;229bab1f9d30:42549 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T21:05:49,833 DEBUG [M:2;229bab1f9d30:42549 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T21:05:49,840 INFO [M:2;229bab1f9d30:42549 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.62 KB heapSize=11.22 KB 2024-12-08T21:05:49,882 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T21:05:49,884 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-08T21:05:49,884 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-08T21:05:49,896 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] region.MasterRegion(342): old store file tracker DEFAULT is the same with new store file tracker, skip migration 2024-12-08T21:05:49,901 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] region.MasterRegion(316): Renamed hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/WALs/229bab1f9d30,42549,1733691945220 to hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/WALs/229bab1f9d30,42549,1733691945220-dead as it is dead 2024-12-08T21:05:49,903 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-12-08T21:05:49,903 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-12-08T21:05:49,903 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/WALs/229bab1f9d30,42549,1733691945220-dead/229bab1f9d30%2C42549%2C1733691945220.1733691947090 2024-12-08T21:05:49,906 WARN [IPC Server handler 0 on default port 44471 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/WALs/229bab1f9d30,42549,1733691945220-dead/229bab1f9d30%2C42549%2C1733691945220.1733691947090 has not been closed. Lease recovery is in progress. RecoveryId = 1013 for block blk_1073741830_1006 2024-12-08T21:05:49,909 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/WALs/229bab1f9d30,42549,1733691945220-dead/229bab1f9d30%2C42549%2C1733691945220.1733691947090 after 5ms 2024-12-08T21:05:49,910 DEBUG [M:2;229bab1f9d30:42549 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/32100cae537f4c00b1bd5358796dddda is 82, key is hbase:meta,,1/info:regioninfo/1733691949105/Put/seqid=0 2024-12-08T21:05:49,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37947 is added to blk_1073741837_1014 (size=5672) 2024-12-08T21:05:50,321 INFO [M:2;229bab1f9d30:42549 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=28 (bloomFilter=true), to=hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/32100cae537f4c00b1bd5358796dddda 2024-12-08T21:05:50,381 DEBUG [M:2;229bab1f9d30:42549 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/31fe8ea044344befbdb43d57660eda9e is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1733691949207/Put/seqid=0 2024-12-08T21:05:50,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37947 is added to blk_1073741838_1015 (size=5275) 2024-12-08T21:05:50,387 INFO [M:2;229bab1f9d30:42549 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=28 (bloomFilter=true), to=hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/31fe8ea044344befbdb43d57660eda9e 2024-12-08T21:05:50,415 DEBUG [M:2;229bab1f9d30:42549 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1968259c3295412fb5306c046334270d is 69, key is 229bab1f9d30,39161,1733691945319/rs:state/1733691947859/Put/seqid=0 2024-12-08T21:05:50,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37947 is added to blk_1073741839_1016 (size=5156) 2024-12-08T21:05:50,822 INFO [M:2;229bab1f9d30:42549 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=28 (bloomFilter=true), to=hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1968259c3295412fb5306c046334270d 2024-12-08T21:05:50,833 DEBUG [M:2;229bab1f9d30:42549 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/32100cae537f4c00b1bd5358796dddda as hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/32100cae537f4c00b1bd5358796dddda 2024-12-08T21:05:50,841 INFO [M:2;229bab1f9d30:42549 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/32100cae537f4c00b1bd5358796dddda, entries=8, sequenceid=28, filesize=5.5 K 2024-12-08T21:05:50,845 DEBUG [M:2;229bab1f9d30:42549 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/31fe8ea044344befbdb43d57660eda9e as hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/31fe8ea044344befbdb43d57660eda9e 2024-12-08T21:05:50,854 INFO [M:2;229bab1f9d30:42549 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/31fe8ea044344befbdb43d57660eda9e, entries=3, sequenceid=28, filesize=5.2 K 2024-12-08T21:05:50,856 DEBUG [M:2;229bab1f9d30:42549 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1968259c3295412fb5306c046334270d as hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1968259c3295412fb5306c046334270d 2024-12-08T21:05:50,864 INFO [M:2;229bab1f9d30:42549 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1968259c3295412fb5306c046334270d, entries=1, sequenceid=28, filesize=5.0 K 2024-12-08T21:05:50,870 INFO [M:2;229bab1f9d30:42549 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.62 KB/7802, heapSize ~10.92 KB/11184, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1033ms, sequenceid=28, compaction requested=false 2024-12-08T21:05:50,872 INFO [M:2;229bab1f9d30:42549 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T21:05:50,872 DEBUG [M:2;229bab1f9d30:42549 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733691949832Disabling compacts and flushes for region at 1733691949832Disabling writes for close at 1733691949833 (+1 ms)Obtaining lock to block concurrent updates at 1733691949841 (+8 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733691949841Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7802, getHeapSize=11424, getOffHeapSize=0, getCellsCount=35 at 1733691949850 (+9 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733691949852 (+2 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733691949854 (+2 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733691949900 (+46 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733691949903 (+3 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733691950363 (+460 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733691950380 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733691950380Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733691950398 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733691950414 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733691950414Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4a3f8ed7: reopening flushed file at 1733691950831 (+417 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7e911c33: reopening flushed file at 1733691950843 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1359c986: reopening flushed file at 1733691950854 (+11 ms)Finished flush of dataSize ~7.62 KB/7802, heapSize ~10.92 KB/11184, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1033ms, sequenceid=28, compaction requested=false at 1733691950870 (+16 ms)Writing region close event to WAL at 1733691950872 (+2 ms)Closed at 1733691950872 2024-12-08T21:05:50,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37947 is added to blk_1073741830_1006 (size=10165) 2024-12-08T21:05:50,881 WARN [Close-WAL-Writer-0 {}] wal.AsyncProtobufLogWriter(165): normal close failed, try recover java.io.IOException: Commit or complete block blk_1073741830_1006, whereas it is under recovery. at org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.commitOrCompleteLastBlock(BlockManager.java:1211) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.commitOrCompleteLastBlock(FSNamesystem.java:3890) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:731) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:110) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:653) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:163) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: Commit or complete block blk_1073741830_1006, whereas it is under recovery. at org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.commitOrCompleteLastBlock(BlockManager.java:1211) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.commitOrCompleteLastBlock(FSNamesystem.java:3890) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:731) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy38.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor2.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy39.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor2.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor2.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor2.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor2.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor2.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor2.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor2.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor2.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor2.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 6 more 2024-12-08T21:05:50,882 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file /user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/WALs/229bab1f9d30,42549,1733691945220/229bab1f9d30%2C42549%2C1733691945220.1733691947090 2024-12-08T21:05:50,883 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/WALs/229bab1f9d30,42549,1733691945220/229bab1f9d30%2C42549%2C1733691945220.1733691947090 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.recoverLease(FSNamesystem.java:2869) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.recoverLease(NameNodeRpcServer.java:872) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.recoverLease(ClientNamenodeProtocolServerSideTranslatorPB.java:834) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:951) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:605) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:166) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/WALs/229bab1f9d30,42549,1733691945220/229bab1f9d30%2C42549%2C1733691945220.1733691947090 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.recoverLease(FSNamesystem.java:2869) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.recoverLease(NameNodeRpcServer.java:872) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.recoverLease(ClientNamenodeProtocolServerSideTranslatorPB.java:834) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy38.recoverLease(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$recoverLease$24(ClientNamenodeProtocolTranslatorPB.java:685) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.recoverLease(ClientNamenodeProtocolTranslatorPB.java:685) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor23.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy39.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor23.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor23.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor23.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor23.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor23.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:949) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 17 more 2024-12-08T21:05:50,884 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/WALs/229bab1f9d30,42549,1733691945220/229bab1f9d30%2C42549%2C1733691945220.1733691947090 2024-12-08T21:05:50,885 ERROR [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2033): Unable to recover lease after several attempts. Give up. java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/WALs/229bab1f9d30,42549,1733691945220/229bab1f9d30%2C42549%2C1733691945220.1733691947090 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.recoverLease(FSNamesystem.java:2869) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.recoverLease(NameNodeRpcServer.java:872) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.recoverLease(ClientNamenodeProtocolServerSideTranslatorPB.java:834) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:951) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/WALs/229bab1f9d30,42549,1733691945220/229bab1f9d30%2C42549%2C1733691945220.1733691947090 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.recoverLease(FSNamesystem.java:2869) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.recoverLease(NameNodeRpcServer.java:872) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.recoverLease(ClientNamenodeProtocolServerSideTranslatorPB.java:834) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy38.recoverLease(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$recoverLease$24(ClientNamenodeProtocolTranslatorPB.java:685) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.recoverLease(ClientNamenodeProtocolTranslatorPB.java:685) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor23.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy39.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor23.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor23.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor23.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor23.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor23.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor23.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor23.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor23.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor23.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor23.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:949) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 16 more 2024-12-08T21:05:50,887 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T21:05:50,887 INFO [M:2;229bab1f9d30:42549 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-08T21:05:50,888 INFO [M:2;229bab1f9d30:42549 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42549 2024-12-08T21:05:50,892 INFO [M:2;229bab1f9d30:42549 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T21:05:50,967 WARN [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.NettyRpcConnection$2(409): Exception encountered while connecting to the server 229bab1f9d30:42549 org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: finishConnect(..) failed: Connection refused: 229bab1f9d30/172.17.0.2:42549 Caused by: java.net.ConnectException: finishConnect(..) failed: Connection refused at org.apache.hbase.thirdparty.io.netty.channel.unix.Errors.newConnectException0(Errors.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.unix.Errors.handleConnectErrno(Errors.java:131) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.unix.Socket.finishConnect(Socket.java:359) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.doFinishConnect(AbstractEpollChannel.java:715) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:692) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T21:05:50,968 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.FailedServers(52): Added failed server with address 229bab1f9d30:42549 to list caused by org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: finishConnect(..) failed: Connection refused: 229bab1f9d30/172.17.0.2:42549 2024-12-08T21:05:51,048 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42549-0x100074d695d0002, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T21:05:51,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42549-0x100074d695d0002, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T21:05:51,049 INFO [M:2;229bab1f9d30:42549 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T21:05:51,070 DEBUG [RS:0;229bab1f9d30:39161 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-08T21:05:51,072 INFO [HMaster-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45941, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-08T21:05:51,073 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37411 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:51,177 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37411 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:51,281 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37411 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:51,387 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37411 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:51,493 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37411 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:51,598 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37411 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:51,704 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37411 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:51,809 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37411 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:51,914 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37411 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:52,020 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37411 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:52,126 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37411 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:52,232 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37411 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:52,337 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37411 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:52,441 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37411 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:52,546 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37411 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:52,649 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37411 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:52,753 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37411 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:52,856 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37411 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:52,959 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37411 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:53,062 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37411 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:53,165 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37411 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:53,269 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37411 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:53,372 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37411 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:53,475 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37411 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:53,578 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37411 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:53,682 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37411 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:53,785 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37411 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:53,891 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37411 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:53,911 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/WALs/229bab1f9d30,42549,1733691945220-dead/229bab1f9d30%2C42549%2C1733691945220.1733691947090 after 4007ms 2024-12-08T21:05:53,913 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] region.MasterRegion(328): Renamed hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/WALs/229bab1f9d30,42549,1733691945220-dead/229bab1f9d30%2C42549%2C1733691945220.1733691947090 to hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.wals/229bab1f9d30%2C42549%2C1733691945220.1733691947090 2024-12-08T21:05:53,913 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] region.MasterRegion(330): Delete empty local region wal dir hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/WALs/229bab1f9d30,42549,1733691945220-dead 2024-12-08T21:05:53,914 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/WALs/229bab1f9d30,37411,1733691945160 2024-12-08T21:05:53,917 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-08T21:05:53,920 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=229bab1f9d30%2C37411%2C1733691945160, suffix=, logDir=hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/WALs/229bab1f9d30,37411,1733691945160, archiveDir=hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/oldWALs, maxLogs=10 2024-12-08T21:05:53,933 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/WALs/229bab1f9d30,37411,1733691945160/229bab1f9d30%2C37411%2C1733691945160.1733691953921, exclude list is [], retry=0 2024-12-08T21:05:53,935 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-08T21:05:53,937 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37947,DS-5537ffa1-b6e0-415b-b039-256d9ea9c201,DISK] 2024-12-08T21:05:53,939 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/WALs/229bab1f9d30,37411,1733691945160/229bab1f9d30%2C37411%2C1733691945160.1733691953921 2024-12-08T21:05:53,939 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41417:41417)] 2024-12-08T21:05:53,939 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-08T21:05:53,940 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T21:05:53,940 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T21:05:53,940 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T21:05:53,942 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T21:05:53,944 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-08T21:05:53,944 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T21:05:53,954 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/32100cae537f4c00b1bd5358796dddda 2024-12-08T21:05:53,954 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T21:05:53,954 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T21:05:53,956 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-08T21:05:53,956 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T21:05:53,965 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/31fe8ea044344befbdb43d57660eda9e 2024-12-08T21:05:53,965 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T21:05:53,965 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T21:05:53,967 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-08T21:05:53,967 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T21:05:53,977 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1968259c3295412fb5306c046334270d 2024-12-08T21:05:53,977 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T21:05:53,977 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T21:05:53,979 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-08T21:05:53,980 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T21:05:53,980 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T21:05:53,981 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T21:05:53,982 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(5516): Found 1 recovered edits file(s) under hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.wals 2024-12-08T21:05:53,982 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.wals/229bab1f9d30%2C42549%2C1733691945220.1733691947090 2024-12-08T21:05:53,996 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37411 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:53,996 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(5793): Applied 0, skipped 37, firstSequenceIdInLog=3, maxSequenceIdInLog=30, path=hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.wals/229bab1f9d30%2C42549%2C1733691945220.1733691947090 2024-12-08T21:05:53,997 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.wals/229bab1f9d30%2C42549%2C1733691945220.1733691947090 2024-12-08T21:05:53,999 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T21:05:54,000 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T21:05:54,000 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-08T21:05:54,002 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T21:05:54,006 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/30.seqid, newMaxSeqId=30, maxSeqId=1 2024-12-08T21:05:54,007 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=31; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59935960, jitterRate=-0.10688459873199463}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-08T21:05:54,008 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733691953940Initializing all the Stores at 1733691953941 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733691953942 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733691953942Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733691953942Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733691953942Cleaning up temporary data from old regions at 1733691954000 (+58 ms)Region opened successfully at 1733691954008 (+8 ms) 2024-12-08T21:05:54,009 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-08T21:05:54,010 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@244da434, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=229bab1f9d30/172.17.0.2:0 2024-12-08T21:05:54,020 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.HMaster(884): The info family in master local region already has data in it, skip migrating... 2024-12-08T21:05:54,021 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-08T21:05:54,021 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-08T21:05:54,021 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-08T21:05:54,022 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-08T21:05:54,026 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(545): Completed pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-08T21:05:54,027 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-12-08T21:05:54,027 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-08T21:05:54,032 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] assignment.RegionStateStore(171): Load hbase:meta entry region=1588230740, regionState=OPEN, lastHost=229bab1f9d30,39161,1733691945319, regionLocation=229bab1f9d30,39161,1733691945319, openSeqNum=2 2024-12-08T21:05:54,032 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] assignment.AssignmentManager(349): Loaded hbase:meta state=OPEN, location=229bab1f9d30,39161,1733691945319, table=hbase:meta, region=1588230740 2024-12-08T21:05:54,032 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 229bab1f9d30,39161,1733691945319, state=OPEN 2024-12-08T21:05:54,047 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39161-0x100074d695d0003, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T21:05:54,047 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44649-0x100074d695d0000, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T21:05:54,047 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37411-0x100074d695d0001, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T21:05:54,047 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T21:05:54,047 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T21:05:54,047 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T21:05:54,054 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 1 possibly 'live' servers, and 0 'splitting'. 2024-12-08T21:05:54,056 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.ServerManager(517): Registering regionserver=229bab1f9d30,39161,1733691945319 2024-12-08T21:05:54,058 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37411-0x100074d695d0001, quorum=127.0.0.1:50945, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-08T21:05:54,097 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-08T21:05:54,098 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-08T21:05:54,099 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37411-0x100074d695d0001, quorum=127.0.0.1:50945, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-08T21:05:54,100 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37411 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:54,105 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-08T21:05:54,106 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-08T21:05:54,108 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37411-0x100074d695d0001, quorum=127.0.0.1:50945, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-08T21:05:54,114 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-08T21:05:54,116 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37411-0x100074d695d0001, quorum=127.0.0.1:50945, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-08T21:05:54,122 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-08T21:05:54,127 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37411-0x100074d695d0001, quorum=127.0.0.1:50945, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-08T21:05:54,139 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-08T21:05:54,140 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=229bab1f9d30,37411,1733691945160, sessionid=0x100074d695d0001, setting cluster-up flag (Was=true) 2024-12-08T21:05:54,150 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-08T21:05:54,152 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=229bab1f9d30,37411,1733691945160 2024-12-08T21:05:54,165 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-08T21:05:54,166 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=229bab1f9d30,37411,1733691945160 2024-12-08T21:05:54,169 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.ServerManager(1190): begin to load .lastflushedseqids at hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/.lastflushedseqids 2024-12-08T21:05:54,172 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-08T21:05:54,172 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-08T21:05:54,173 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 229bab1f9d30,37411,1733691945160 Number of backup masters: 1 229bab1f9d30,44649,1733691944456 Number of live region servers: 1 229bab1f9d30,39161,1733691945319 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-08T21:05:54,175 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/229bab1f9d30:0, corePoolSize=5, maxPoolSize=5 2024-12-08T21:05:54,175 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/229bab1f9d30:0, corePoolSize=5, maxPoolSize=5 2024-12-08T21:05:54,175 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/229bab1f9d30:0, corePoolSize=5, maxPoolSize=5 2024-12-08T21:05:54,175 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/229bab1f9d30:0, corePoolSize=5, maxPoolSize=5 2024-12-08T21:05:54,175 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/229bab1f9d30:0, corePoolSize=10, maxPoolSize=10 2024-12-08T21:05:54,175 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T21:05:54,175 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/229bab1f9d30:0, corePoolSize=2, maxPoolSize=2 2024-12-08T21:05:54,176 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T21:05:54,180 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733691984180 2024-12-08T21:05:54,180 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-08T21:05:54,180 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-08T21:05:54,180 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-08T21:05:54,180 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-08T21:05:54,180 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-08T21:05:54,180 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-08T21:05:54,180 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T21:05:54,181 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-08T21:05:54,181 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-08T21:05:54,181 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-08T21:05:54,181 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-08T21:05:54,182 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-08T21:05:54,182 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/229bab1f9d30:0:becomeActiveMaster-HFileCleaner.large.0-1733691954182,5,FailOnTimeoutGroup] 2024-12-08T21:05:54,182 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/229bab1f9d30:0:becomeActiveMaster-HFileCleaner.small.0-1733691954182,5,FailOnTimeoutGroup] 2024-12-08T21:05:54,182 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T21:05:54,182 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-08T21:05:54,182 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-08T21:05:54,182 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-08T21:05:54,182 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733691954182, completionTime=-1 2024-12-08T21:05:54,183 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-08T21:05:54,183 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-08T21:05:54,183 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T21:05:54,183 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=229bab1f9d30,39161,1733691945319, seqNum=-1] 2024-12-08T21:05:54,184 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T21:05:54,185 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60575, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T21:05:54,186 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-08T21:05:54,187 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733692014187 2024-12-08T21:05:54,187 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733692074187 2024-12-08T21:05:54,187 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 4 msec 2024-12-08T21:05:54,187 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=229bab1f9d30,37411,1733691945160-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T21:05:54,187 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=229bab1f9d30,37411,1733691945160-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T21:05:54,187 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=229bab1f9d30,37411,1733691945160-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T21:05:54,188 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-229bab1f9d30:37411, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T21:05:54,188 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-08T21:05:54,188 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-08T21:05:54,190 DEBUG [master/229bab1f9d30:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-08T21:05:54,193 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 4.762sec 2024-12-08T21:05:54,193 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-08T21:05:54,193 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-08T21:05:54,193 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-08T21:05:54,193 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-08T21:05:54,193 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-08T21:05:54,193 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=229bab1f9d30,37411,1733691945160-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T21:05:54,193 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=229bab1f9d30,37411,1733691945160-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-08T21:05:54,197 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-08T21:05:54,197 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-08T21:05:54,197 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=229bab1f9d30,37411,1733691945160-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T21:05:54,257 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-08T21:05:54,257 INFO [Time-limited test {}] hbase.SingleProcessHBaseCluster(618): Stopping Thread[M:1;229bab1f9d30:37411,5,FailOnTimeoutGroup] 2024-12-08T21:05:54,257 INFO [Time-limited test {}] master.HMaster(3321): ***** STOPPING master '229bab1f9d30,37411,1733691945160' ***** 2024-12-08T21:05:54,257 INFO [Time-limited test {}] master.HMaster(3323): STOPPED: Stopping master 1 2024-12-08T21:05:54,258 INFO [M:1;229bab1f9d30:37411 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T21:05:54,258 INFO [M:1;229bab1f9d30:37411 {}] client.AsyncConnectionImpl(233): Connection has been closed by M:1;229bab1f9d30:37411. 2024-12-08T21:05:54,258 DEBUG [M:1;229bab1f9d30:37411 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:630) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T21:05:54,258 DEBUG [M:1;229bab1f9d30:37411 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T21:05:54,258 INFO [M:1;229bab1f9d30:37411 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T21:05:54,258 DEBUG [M:1;229bab1f9d30:37411 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-08T21:05:54,258 DEBUG [M:1;229bab1f9d30:37411 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-08T21:05:54,258 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-08T21:05:54,258 DEBUG [master/229bab1f9d30:0:becomeActiveMaster-HFileCleaner.large.0-1733691954182 {}] cleaner.HFileCleaner(306): Exit Thread[master/229bab1f9d30:0:becomeActiveMaster-HFileCleaner.large.0-1733691954182,5,FailOnTimeoutGroup] 2024-12-08T21:05:54,258 DEBUG [master/229bab1f9d30:0:becomeActiveMaster-HFileCleaner.small.0-1733691954182 {}] cleaner.HFileCleaner(306): Exit Thread[master/229bab1f9d30:0:becomeActiveMaster-HFileCleaner.small.0-1733691954182,5,FailOnTimeoutGroup] 2024-12-08T21:05:54,259 INFO [M:1;229bab1f9d30:37411 {}] hbase.ChoreService(370): Chore service for: master/229bab1f9d30:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-08T21:05:54,259 INFO [M:1;229bab1f9d30:37411 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T21:05:54,259 DEBUG [M:1;229bab1f9d30:37411 {}] master.HMaster(1795): Stopping service threads 2024-12-08T21:05:54,259 INFO [M:1;229bab1f9d30:37411 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-08T21:05:54,259 INFO [M:1;229bab1f9d30:37411 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T21:05:54,259 INFO [M:1;229bab1f9d30:37411 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-08T21:05:54,259 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-08T21:05:54,288 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37411-0x100074d695d0001, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-08T21:05:54,288 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39161-0x100074d695d0003, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-08T21:05:54,288 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44649-0x100074d695d0000, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-08T21:05:54,288 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37411-0x100074d695d0001, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T21:05:54,288 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39161-0x100074d695d0003, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T21:05:54,288 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44649-0x100074d695d0000, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T21:05:54,288 DEBUG [M:1;229bab1f9d30:37411 {}] zookeeper.ZKUtil(347): master:37411-0x100074d695d0001, quorum=127.0.0.1:50945, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-08T21:05:54,288 INFO [Time-limited test {}] hbase.LocalHBaseCluster(362): Waiting on 229bab1f9d30,37411,1733691945160 2024-12-08T21:05:54,288 WARN [M:1;229bab1f9d30:37411 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-08T21:05:54,289 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44649-0x100074d695d0000, quorum=127.0.0.1:50945, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-08T21:05:54,289 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39161-0x100074d695d0003, quorum=127.0.0.1:50945, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-08T21:05:54,289 DEBUG [zk-event-processor-pool-0 {}] master.ActiveMasterManager(204): No master available. Notifying waiting threads 2024-12-08T21:05:54,290 INFO [M:1;229bab1f9d30:37411 {}] master.ServerManager(1134): Rewriting .lastflushedseqids file at: hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/.lastflushedseqids 2024-12-08T21:05:54,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37947 is added to blk_1073741841_1018 (size=99) 2024-12-08T21:05:54,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37411-0x100074d695d0001, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T21:05:54,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39161-0x100074d695d0003, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-08T21:05:54,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44649-0x100074d695d0000, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-08T21:05:54,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39161-0x100074d695d0003, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T21:05:54,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44649-0x100074d695d0000, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T21:05:54,297 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44649-0x100074d695d0000, quorum=127.0.0.1:50945, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-08T21:05:54,297 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:44649-0x100074d695d0000, quorum=127.0.0.1:50945, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-08T21:05:54,298 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/229bab1f9d30,44649,1733691944456 from backup master directory 2024-12-08T21:05:54,305 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44649-0x100074d695d0000, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/229bab1f9d30,44649,1733691944456 2024-12-08T21:05:54,305 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37411-0x100074d695d0001, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T21:05:54,305 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39161-0x100074d695d0003, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T21:05:54,305 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44649-0x100074d695d0000, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T21:05:54,305 WARN [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T21:05:54,305 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=229bab1f9d30,44649,1733691944456 2024-12-08T21:05:54,322 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T21:05:54,322 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-08T21:05:54,328 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=1) cost 6ms. 2024-12-08T21:05:54,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37947 is added to blk_1073741842_1019 (size=196) 2024-12-08T21:05:54,565 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-08T21:05:54,697 INFO [M:1;229bab1f9d30:37411 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-08T21:05:54,697 INFO [M:1;229bab1f9d30:37411 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-08T21:05:54,697 DEBUG [M:1;229bab1f9d30:37411 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T21:05:54,697 INFO [M:1;229bab1f9d30:37411 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T21:05:54,697 DEBUG [M:1;229bab1f9d30:37411 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T21:05:54,697 DEBUG [M:1;229bab1f9d30:37411 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T21:05:54,697 DEBUG [M:1;229bab1f9d30:37411 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T21:05:54,697 INFO [M:1;229bab1f9d30:37411 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=48 B heapSize=1.12 KB 2024-12-08T21:05:54,714 DEBUG [M:1;229bab1f9d30:37411 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1c0e16ac38b14adca04eff629b90c4bf is 52, key is load_balancer_on/state:d/1733691954255/Put/seqid=0 2024-12-08T21:05:54,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37947 is added to blk_1073741843_1020 (size=5056) 2024-12-08T21:05:54,746 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T21:05:54,747 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-08T21:05:54,747 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-08T21:05:54,755 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] region.MasterRegion(342): old store file tracker DEFAULT is the same with new store file tracker, skip migration 2024-12-08T21:05:54,758 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] region.MasterRegion(316): Renamed hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/WALs/229bab1f9d30,37411,1733691945160 to hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/WALs/229bab1f9d30,37411,1733691945160-dead as it is dead 2024-12-08T21:05:54,759 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/WALs/229bab1f9d30,37411,1733691945160-dead/229bab1f9d30%2C37411%2C1733691945160.1733691953921 2024-12-08T21:05:54,759 WARN [IPC Server handler 3 on default port 44471 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/WALs/229bab1f9d30,37411,1733691945160-dead/229bab1f9d30%2C37411%2C1733691945160.1733691953921 has not been closed. Lease recovery is in progress. RecoveryId = 1021 for block blk_1073741840_1017 2024-12-08T21:05:54,759 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/WALs/229bab1f9d30,37411,1733691945160-dead/229bab1f9d30%2C37411%2C1733691945160.1733691953921 after 0ms 2024-12-08T21:05:54,797 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-08T21:05:54,798 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-08T21:05:54,799 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-08T21:05:54,799 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-08T21:05:54,799 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-08T21:05:54,799 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-08T21:05:55,120 INFO [M:1;229bab1f9d30:37411 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=33 (bloomFilter=true), to=hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1c0e16ac38b14adca04eff629b90c4bf 2024-12-08T21:05:55,129 DEBUG [M:1;229bab1f9d30:37411 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1c0e16ac38b14adca04eff629b90c4bf as hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/1c0e16ac38b14adca04eff629b90c4bf 2024-12-08T21:05:55,137 INFO [M:1;229bab1f9d30:37411 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/1c0e16ac38b14adca04eff629b90c4bf, entries=1, sequenceid=33, filesize=4.9 K 2024-12-08T21:05:55,139 INFO [M:1;229bab1f9d30:37411 {}] regionserver.HRegion(3140): Finished flush of dataSize ~48 B/48, heapSize ~360 B/360, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 441ms, sequenceid=33, compaction requested=false 2024-12-08T21:05:55,140 INFO [M:1;229bab1f9d30:37411 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T21:05:55,140 DEBUG [M:1;229bab1f9d30:37411 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733691954697Disabling compacts and flushes for region at 1733691954697Disabling writes for close at 1733691954697Obtaining lock to block concurrent updates at 1733691954697Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733691954697Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=48, getHeapSize=1080, getOffHeapSize=0, getCellsCount=1 at 1733691954698 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733691954699 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733691954699Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733691954713 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733691954713Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@37d63ad8: reopening flushed file at 1733691955127 (+414 ms)Finished flush of dataSize ~48 B/48, heapSize ~360 B/360, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 441ms, sequenceid=33, compaction requested=false at 1733691955139 (+12 ms)Writing region close event to WAL at 1733691955140 (+1 ms)Closed at 1733691955140 2024-12-08T21:05:55,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37947 is added to blk_1073741840_1017 (size=767) 2024-12-08T21:05:55,144 WARN [Close-WAL-Writer-0 {}] wal.AsyncProtobufLogWriter(165): normal close failed, try recover java.io.IOException: Commit or complete block blk_1073741840_1017, whereas it is under recovery. at org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.commitOrCompleteLastBlock(BlockManager.java:1211) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.commitOrCompleteLastBlock(FSNamesystem.java:3890) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:731) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:110) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:653) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:163) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: Commit or complete block blk_1073741840_1017, whereas it is under recovery. at org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.commitOrCompleteLastBlock(BlockManager.java:1211) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.commitOrCompleteLastBlock(FSNamesystem.java:3890) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:731) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy38.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor2.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy39.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor2.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor2.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor2.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor2.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor2.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor2.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor2.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor2.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor2.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor2.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 6 more 2024-12-08T21:05:55,144 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file /user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/WALs/229bab1f9d30,37411,1733691945160/229bab1f9d30%2C37411%2C1733691945160.1733691953921 2024-12-08T21:05:55,145 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/WALs/229bab1f9d30,37411,1733691945160/229bab1f9d30%2C37411%2C1733691945160.1733691953921 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.recoverLease(FSNamesystem.java:2869) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.recoverLease(NameNodeRpcServer.java:872) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.recoverLease(ClientNamenodeProtocolServerSideTranslatorPB.java:834) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:951) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:605) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:166) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/WALs/229bab1f9d30,37411,1733691945160/229bab1f9d30%2C37411%2C1733691945160.1733691953921 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.recoverLease(FSNamesystem.java:2869) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.recoverLease(NameNodeRpcServer.java:872) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.recoverLease(ClientNamenodeProtocolServerSideTranslatorPB.java:834) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy38.recoverLease(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$recoverLease$24(ClientNamenodeProtocolTranslatorPB.java:685) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.recoverLease(ClientNamenodeProtocolTranslatorPB.java:685) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor23.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy39.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor23.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor23.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor23.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor23.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor23.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor23.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor23.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor23.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor23.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor23.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor23.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:949) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 17 more 2024-12-08T21:05:55,146 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/WALs/229bab1f9d30,37411,1733691945160/229bab1f9d30%2C37411%2C1733691945160.1733691953921 2024-12-08T21:05:55,147 ERROR [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2033): Unable to recover lease after several attempts. Give up. java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/WALs/229bab1f9d30,37411,1733691945160/229bab1f9d30%2C37411%2C1733691945160.1733691953921 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.recoverLease(FSNamesystem.java:2869) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.recoverLease(NameNodeRpcServer.java:872) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.recoverLease(ClientNamenodeProtocolServerSideTranslatorPB.java:834) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:951) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/WALs/229bab1f9d30,37411,1733691945160/229bab1f9d30%2C37411%2C1733691945160.1733691953921 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.recoverLease(FSNamesystem.java:2869) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.recoverLease(NameNodeRpcServer.java:872) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.recoverLease(ClientNamenodeProtocolServerSideTranslatorPB.java:834) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy38.recoverLease(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$recoverLease$24(ClientNamenodeProtocolTranslatorPB.java:685) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.recoverLease(ClientNamenodeProtocolTranslatorPB.java:685) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor23.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy39.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor23.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor23.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor23.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor23.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor23.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor23.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor23.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor23.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor23.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor23.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor23.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy42.recoverLease(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:949) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 16 more 2024-12-08T21:05:55,147 INFO [M:1;229bab1f9d30:37411 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-08T21:05:55,147 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T21:05:55,148 INFO [M:1;229bab1f9d30:37411 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37411 2024-12-08T21:05:55,148 INFO [M:1;229bab1f9d30:37411 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T21:05:55,235 WARN [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.NettyRpcConnection$2(409): Exception encountered while connecting to the server 229bab1f9d30:37411 org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: finishConnect(..) failed: Connection refused: 229bab1f9d30/172.17.0.2:37411 Caused by: java.net.ConnectException: finishConnect(..) failed: Connection refused at org.apache.hbase.thirdparty.io.netty.channel.unix.Errors.newConnectException0(Errors.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.unix.Errors.handleConnectErrno(Errors.java:131) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.unix.Socket.finishConnect(Socket.java:359) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.doFinishConnect(AbstractEpollChannel.java:715) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:692) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T21:05:55,235 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.FailedServers(52): Added failed server with address 229bab1f9d30:37411 to list caused by org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: finishConnect(..) failed: Connection refused: 229bab1f9d30/172.17.0.2:37411 2024-12-08T21:05:55,297 INFO [M:1;229bab1f9d30:37411 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T21:05:55,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37411-0x100074d695d0001, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T21:05:55,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37411-0x100074d695d0001, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T21:05:55,337 DEBUG [RS:0;229bab1f9d30:39161 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-08T21:05:55,339 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55563, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-08T21:05:55,339 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44649 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:55,443 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44649 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:55,546 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44649 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:55,651 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44649 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:55,757 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44649 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:55,865 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44649 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:55,970 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44649 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:56,073 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44649 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:56,178 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44649 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:56,285 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44649 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:56,392 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44649 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:56,500 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44649 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:56,608 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44649 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:56,715 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44649 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:56,822 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44649 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:56,926 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44649 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:57,030 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44649 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:57,135 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44649 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:57,243 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44649 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:57,348 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44649 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:57,452 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44649 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:57,555 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44649 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:57,658 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44649 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:57,761 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44649 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:57,865 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44649 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:57,969 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44649 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:58,073 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44649 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:58,177 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44649 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:58,280 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44649 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:58,383 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44649 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:58,487 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44649 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:58,591 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44649 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:58,697 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44649 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:58,761 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/WALs/229bab1f9d30,37411,1733691945160-dead/229bab1f9d30%2C37411%2C1733691945160.1733691953921 after 4002ms 2024-12-08T21:05:58,762 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] region.MasterRegion(328): Renamed hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/WALs/229bab1f9d30,37411,1733691945160-dead/229bab1f9d30%2C37411%2C1733691945160.1733691953921 to hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.wals/229bab1f9d30%2C37411%2C1733691945160.1733691953921 2024-12-08T21:05:58,762 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] region.MasterRegion(330): Delete empty local region wal dir hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/WALs/229bab1f9d30,37411,1733691945160-dead 2024-12-08T21:05:58,763 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/WALs/229bab1f9d30,44649,1733691944456 2024-12-08T21:05:58,766 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-08T21:05:58,770 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=229bab1f9d30%2C44649%2C1733691944456, suffix=, logDir=hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/WALs/229bab1f9d30,44649,1733691944456, archiveDir=hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/oldWALs, maxLogs=10 2024-12-08T21:05:58,788 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/WALs/229bab1f9d30,44649,1733691944456/229bab1f9d30%2C44649%2C1733691944456.1733691958770, exclude list is [], retry=0 2024-12-08T21:05:58,793 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37947,DS-5537ffa1-b6e0-415b-b039-256d9ea9c201,DISK] 2024-12-08T21:05:58,797 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/WALs/229bab1f9d30,44649,1733691944456/229bab1f9d30%2C44649%2C1733691944456.1733691958770 2024-12-08T21:05:58,797 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41417:41417)] 2024-12-08T21:05:58,797 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-08T21:05:58,798 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T21:05:58,798 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T21:05:58,798 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T21:05:58,801 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T21:05:58,801 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44649 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:58,803 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-08T21:05:58,803 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T21:05:58,818 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/32100cae537f4c00b1bd5358796dddda 2024-12-08T21:05:58,818 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T21:05:58,819 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T21:05:58,820 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-08T21:05:58,820 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T21:05:58,831 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/31fe8ea044344befbdb43d57660eda9e 2024-12-08T21:05:58,831 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T21:05:58,831 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T21:05:58,833 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-08T21:05:58,833 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T21:05:58,845 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1968259c3295412fb5306c046334270d 2024-12-08T21:05:58,845 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T21:05:58,845 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T21:05:58,848 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-08T21:05:58,848 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T21:05:58,859 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/1c0e16ac38b14adca04eff629b90c4bf 2024-12-08T21:05:58,860 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T21:05:58,860 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T21:05:58,861 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(5516): Found 1 recovered edits file(s) under hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.wals 2024-12-08T21:05:58,861 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.wals/229bab1f9d30%2C37411%2C1733691945160.1733691953921 2024-12-08T21:05:58,865 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(5793): Applied 0, skipped 3, firstSequenceIdInLog=32, maxSequenceIdInLog=35, path=hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.wals/229bab1f9d30%2C37411%2C1733691945160.1733691953921 2024-12-08T21:05:58,866 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.wals/229bab1f9d30%2C37411%2C1733691945160.1733691953921 2024-12-08T21:05:58,868 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T21:05:58,868 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T21:05:58,869 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-08T21:05:58,870 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T21:05:58,873 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/35.seqid, newMaxSeqId=35, maxSeqId=30 2024-12-08T21:05:58,874 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=36; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63692019, jitterRate=-0.050914958119392395}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-08T21:05:58,875 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733691958798Initializing all the Stores at 1733691958800 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733691958800Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733691958801 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733691958801Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733691958801Cleaning up temporary data from old regions at 1733691958868 (+67 ms)Region opened successfully at 1733691958875 (+7 ms) 2024-12-08T21:05:58,876 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-08T21:05:58,877 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32a9ed87, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=229bab1f9d30/172.17.0.2:0 2024-12-08T21:05:58,883 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.HMaster(884): The info family in master local region already has data in it, skip migrating... 2024-12-08T21:05:58,883 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-08T21:05:58,883 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-08T21:05:58,884 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-08T21:05:58,885 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-08T21:05:58,891 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(545): Completed pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-08T21:05:58,891 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 5 msec 2024-12-08T21:05:58,891 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-08T21:05:58,897 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] assignment.RegionStateStore(171): Load hbase:meta entry region=1588230740, regionState=OPEN, lastHost=229bab1f9d30,39161,1733691945319, regionLocation=229bab1f9d30,39161,1733691945319, openSeqNum=2 2024-12-08T21:05:58,897 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] assignment.AssignmentManager(349): Loaded hbase:meta state=OPEN, location=229bab1f9d30,39161,1733691945319, table=hbase:meta, region=1588230740 2024-12-08T21:05:58,897 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 229bab1f9d30,39161,1733691945319, state=OPEN 2024-12-08T21:05:58,904 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44649 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:59,094 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39161-0x100074d695d0003, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T21:05:59,094 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44649-0x100074d695d0000, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T21:05:59,095 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T21:05:59,095 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T21:05:59,100 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 1 possibly 'live' servers, and 0 'splitting'. 2024-12-08T21:05:59,100 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.ServerManager(517): Registering regionserver=229bab1f9d30,39161,1733691945319 2024-12-08T21:05:59,198 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44649 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:59,268 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-08T21:05:59,277 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-08T21:05:59,279 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44649-0x100074d695d0000, quorum=127.0.0.1:50945, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-08T21:05:59,372 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44649 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:59,438 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-08T21:05:59,440 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-08T21:05:59,442 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44649-0x100074d695d0000, quorum=127.0.0.1:50945, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-08T21:05:59,542 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44649 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:59,621 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-08T21:05:59,624 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44649-0x100074d695d0000, quorum=127.0.0.1:50945, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-08T21:05:59,724 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44649 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:59,797 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-08T21:05:59,804 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44649-0x100074d695d0000, quorum=127.0.0.1:50945, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-08T21:05:59,899 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44649 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerReport(MasterRpcServices.java:630) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16716) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-08T21:05:59,909 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-08T21:05:59,909 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=229bab1f9d30,44649,1733691944456, sessionid=0x100074d695d0000, setting cluster-up flag (Was=true) 2024-12-08T21:05:59,922 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-08T21:05:59,924 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=229bab1f9d30,44649,1733691944456 2024-12-08T21:05:59,930 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-08T21:05:59,932 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=229bab1f9d30,44649,1733691944456 2024-12-08T21:05:59,934 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.ServerManager(1190): begin to load .lastflushedseqids at hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/.lastflushedseqids 2024-12-08T21:05:59,938 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-08T21:05:59,938 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-08T21:05:59,938 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 229bab1f9d30,44649,1733691944456 Number of backup masters: 0 Number of live region servers: 1 229bab1f9d30,39161,1733691945319 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-08T21:05:59,940 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/229bab1f9d30:0, corePoolSize=5, maxPoolSize=5 2024-12-08T21:05:59,940 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/229bab1f9d30:0, corePoolSize=5, maxPoolSize=5 2024-12-08T21:05:59,941 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/229bab1f9d30:0, corePoolSize=5, maxPoolSize=5 2024-12-08T21:05:59,941 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/229bab1f9d30:0, corePoolSize=5, maxPoolSize=5 2024-12-08T21:05:59,941 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/229bab1f9d30:0, corePoolSize=10, maxPoolSize=10 2024-12-08T21:05:59,941 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T21:05:59,941 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/229bab1f9d30:0, corePoolSize=2, maxPoolSize=2 2024-12-08T21:05:59,941 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/229bab1f9d30:0, corePoolSize=1, maxPoolSize=1 2024-12-08T21:05:59,942 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733691989942 2024-12-08T21:05:59,942 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-08T21:05:59,942 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-08T21:05:59,942 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-08T21:05:59,942 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-08T21:05:59,942 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-08T21:05:59,942 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-08T21:05:59,942 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T21:05:59,943 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-08T21:05:59,943 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-08T21:05:59,943 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-08T21:05:59,943 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-08T21:05:59,943 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-08T21:05:59,943 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/229bab1f9d30:0:becomeActiveMaster-HFileCleaner.large.0-1733691959943,5,FailOnTimeoutGroup] 2024-12-08T21:05:59,944 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/229bab1f9d30:0:becomeActiveMaster-HFileCleaner.small.0-1733691959943,5,FailOnTimeoutGroup] 2024-12-08T21:05:59,944 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T21:05:59,944 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-08T21:05:59,944 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-08T21:05:59,944 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-08T21:05:59,944 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733691959944, completionTime=-1 2024-12-08T21:05:59,944 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-08T21:05:59,944 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-08T21:05:59,945 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-08T21:05:59,945 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=229bab1f9d30,39161,1733691945319, seqNum=-1] 2024-12-08T21:05:59,945 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T21:05:59,947 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38965, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T21:05:59,948 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-08T21:05:59,948 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733692019948 2024-12-08T21:05:59,948 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733692079948 2024-12-08T21:05:59,948 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 4 msec 2024-12-08T21:05:59,949 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=229bab1f9d30,44649,1733691944456-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T21:05:59,949 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=229bab1f9d30,44649,1733691944456-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T21:05:59,949 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=229bab1f9d30,44649,1733691944456-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T21:05:59,949 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-229bab1f9d30:44649, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T21:05:59,949 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-08T21:05:59,950 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-08T21:05:59,952 DEBUG [master/229bab1f9d30:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-08T21:05:59,954 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 5.649sec 2024-12-08T21:05:59,955 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-08T21:05:59,955 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-08T21:05:59,955 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-08T21:05:59,955 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-08T21:05:59,955 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-08T21:05:59,955 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=229bab1f9d30,44649,1733691944456-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T21:05:59,955 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=229bab1f9d30,44649,1733691944456-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-08T21:05:59,958 DEBUG [master/229bab1f9d30:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-08T21:05:59,958 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-08T21:05:59,959 INFO [master/229bab1f9d30:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=229bab1f9d30,44649,1733691944456-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T21:06:00,003 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-08T21:06:00,003 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-08T21:06:00,003 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.master.TestMasterFailoverBalancerPersistence.testMasterFailoverBalancerPersistence(TestMasterFailoverBalancerPersistence.java:81) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T21:06:00,003 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T21:06:00,004 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T21:06:00,004 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-08T21:06:00,004 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-08T21:06:00,004 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1363448807, stopped=false 2024-12-08T21:06:00,005 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=229bab1f9d30,44649,1733691944456 2024-12-08T21:06:00,037 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39161-0x100074d695d0003, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T21:06:00,037 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44649-0x100074d695d0000, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T21:06:00,037 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T21:06:00,037 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39161-0x100074d695d0003, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T21:06:00,037 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44649-0x100074d695d0000, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T21:06:00,037 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-08T21:06:00,038 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.master.TestMasterFailoverBalancerPersistence.testMasterFailoverBalancerPersistence(TestMasterFailoverBalancerPersistence.java:81) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T21:06:00,038 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T21:06:00,038 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '229bab1f9d30,39161,1733691945319' ***** 2024-12-08T21:06:00,038 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-08T21:06:00,038 INFO [RS:0;229bab1f9d30:39161 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-08T21:06:00,038 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39161-0x100074d695d0003, quorum=127.0.0.1:50945, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T21:06:00,039 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44649-0x100074d695d0000, quorum=127.0.0.1:50945, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T21:06:00,039 INFO [RS:0;229bab1f9d30:39161 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-08T21:06:00,039 INFO [RS:0;229bab1f9d30:39161 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-08T21:06:00,039 INFO [RS:0;229bab1f9d30:39161 {}] regionserver.HRegionServer(959): stopping server 229bab1f9d30,39161,1733691945319 2024-12-08T21:06:00,039 INFO [RS:0;229bab1f9d30:39161 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T21:06:00,039 INFO [RS:0;229bab1f9d30:39161 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;229bab1f9d30:39161. 2024-12-08T21:06:00,039 DEBUG [RS:0;229bab1f9d30:39161 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T21:06:00,039 DEBUG [RS:0;229bab1f9d30:39161 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T21:06:00,039 INFO [RS:0;229bab1f9d30:39161 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-08T21:06:00,039 INFO [RS:0;229bab1f9d30:39161 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-08T21:06:00,039 INFO [RS:0;229bab1f9d30:39161 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-08T21:06:00,040 INFO [RS:0;229bab1f9d30:39161 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-08T21:06:00,040 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-08T21:06:00,040 INFO [RS:0;229bab1f9d30:39161 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-08T21:06:00,041 DEBUG [RS:0;229bab1f9d30:39161 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-08T21:06:00,041 DEBUG [RS:0;229bab1f9d30:39161 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-08T21:06:00,041 DEBUG [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-08T21:06:00,041 INFO [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-08T21:06:00,041 DEBUG [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-08T21:06:00,041 DEBUG [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T21:06:00,041 DEBUG [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T21:06:00,042 INFO [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-12-08T21:06:00,072 DEBUG [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/data/hbase/meta/1588230740/.tmp/ns/88c309a79bd84e659fbf1e53b1101f56 is 43, key is default/ns:d/1733691949188/Put/seqid=0 2024-12-08T21:06:00,079 INFO [regionserver/229bab1f9d30:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T21:06:00,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37947 is added to blk_1073741845_1023 (size=5153) 2024-12-08T21:06:00,087 INFO [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/data/hbase/meta/1588230740/.tmp/ns/88c309a79bd84e659fbf1e53b1101f56 2024-12-08T21:06:00,104 DEBUG [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/data/hbase/meta/1588230740/.tmp/ns/88c309a79bd84e659fbf1e53b1101f56 as hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/data/hbase/meta/1588230740/ns/88c309a79bd84e659fbf1e53b1101f56 2024-12-08T21:06:00,117 INFO [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/data/hbase/meta/1588230740/ns/88c309a79bd84e659fbf1e53b1101f56, entries=2, sequenceid=6, filesize=5.0 K 2024-12-08T21:06:00,119 INFO [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 78ms, sequenceid=6, compaction requested=false 2024-12-08T21:06:00,126 DEBUG [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-08T21:06:00,128 DEBUG [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-08T21:06:00,128 INFO [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-08T21:06:00,128 DEBUG [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733691960041Running coprocessor pre-close hooks at 1733691960041Disabling compacts and flushes for region at 1733691960041Disabling writes for close at 1733691960041Obtaining lock to block concurrent updates at 1733691960042 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1733691960042Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1733691960042Flushing stores of hbase:meta,,1.1588230740 at 1733691960043 (+1 ms)Flushing 1588230740/ns: creating writer at 1733691960044 (+1 ms)Flushing 1588230740/ns: appending metadata at 1733691960071 (+27 ms)Flushing 1588230740/ns: closing flushed file at 1733691960071Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3359ff16: reopening flushed file at 1733691960102 (+31 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 78ms, sequenceid=6, compaction requested=false at 1733691960119 (+17 ms)Writing region close event to WAL at 1733691960120 (+1 ms)Running coprocessor post-close hooks at 1733691960127 (+7 ms)Closed at 1733691960128 (+1 ms) 2024-12-08T21:06:00,129 DEBUG [RS_CLOSE_META-regionserver/229bab1f9d30:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-08T21:06:00,241 INFO [RS:0;229bab1f9d30:39161 {}] regionserver.HRegionServer(976): stopping server 229bab1f9d30,39161,1733691945319; all regions closed. 2024-12-08T21:06:00,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37947 is added to blk_1073741834_1010 (size=1152) 2024-12-08T21:06:00,253 DEBUG [RS:0;229bab1f9d30:39161 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/oldWALs 2024-12-08T21:06:00,253 INFO [RS:0;229bab1f9d30:39161 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 229bab1f9d30%2C39161%2C1733691945319.meta:.meta(num 1733691949022) 2024-12-08T21:06:00,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37947 is added to blk_1073741832_1008 (size=93) 2024-12-08T21:06:00,261 DEBUG [RS:0;229bab1f9d30:39161 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/oldWALs 2024-12-08T21:06:00,261 INFO [RS:0;229bab1f9d30:39161 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 229bab1f9d30%2C39161%2C1733691945319:(num 1733691948117) 2024-12-08T21:06:00,261 DEBUG [RS:0;229bab1f9d30:39161 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T21:06:00,261 INFO [RS:0;229bab1f9d30:39161 {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T21:06:00,261 INFO [RS:0;229bab1f9d30:39161 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T21:06:00,261 INFO [RS:0;229bab1f9d30:39161 {}] hbase.ChoreService(370): Chore service for: regionserver/229bab1f9d30:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-08T21:06:00,261 INFO [RS:0;229bab1f9d30:39161 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T21:06:00,261 INFO [regionserver/229bab1f9d30:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T21:06:00,261 INFO [RS:0;229bab1f9d30:39161 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39161 2024-12-08T21:06:00,279 INFO [RS:0;229bab1f9d30:39161 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T21:06:00,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44649-0x100074d695d0000, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T21:06:00,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39161-0x100074d695d0003, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/229bab1f9d30,39161,1733691945319 2024-12-08T21:06:00,288 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [229bab1f9d30,39161,1733691945319] 2024-12-08T21:06:00,296 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/229bab1f9d30,39161,1733691945319 already deleted, retry=false 2024-12-08T21:06:00,296 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 229bab1f9d30,39161,1733691945319 expired; onlineServers=0 2024-12-08T21:06:00,296 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '229bab1f9d30,44649,1733691944456' ***** 2024-12-08T21:06:00,296 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-08T21:06:00,296 INFO [M:0;229bab1f9d30:44649 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-08T21:06:00,296 INFO [M:0;229bab1f9d30:44649 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-08T21:06:00,296 DEBUG [M:0;229bab1f9d30:44649 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-08T21:06:00,297 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-08T21:06:00,297 DEBUG [M:0;229bab1f9d30:44649 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-08T21:06:00,297 DEBUG [master/229bab1f9d30:0:becomeActiveMaster-HFileCleaner.large.0-1733691959943 {}] cleaner.HFileCleaner(306): Exit Thread[master/229bab1f9d30:0:becomeActiveMaster-HFileCleaner.large.0-1733691959943,5,FailOnTimeoutGroup] 2024-12-08T21:06:00,297 DEBUG [master/229bab1f9d30:0:becomeActiveMaster-HFileCleaner.small.0-1733691959943 {}] cleaner.HFileCleaner(306): Exit Thread[master/229bab1f9d30:0:becomeActiveMaster-HFileCleaner.small.0-1733691959943,5,FailOnTimeoutGroup] 2024-12-08T21:06:00,297 INFO [M:0;229bab1f9d30:44649 {}] hbase.ChoreService(370): Chore service for: master/229bab1f9d30:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-08T21:06:00,297 INFO [M:0;229bab1f9d30:44649 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-08T21:06:00,297 DEBUG [M:0;229bab1f9d30:44649 {}] master.HMaster(1795): Stopping service threads 2024-12-08T21:06:00,297 INFO [M:0;229bab1f9d30:44649 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-08T21:06:00,297 INFO [M:0;229bab1f9d30:44649 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-08T21:06:00,297 INFO [M:0;229bab1f9d30:44649 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-08T21:06:00,297 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-08T21:06:00,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44649-0x100074d695d0000, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-08T21:06:00,304 DEBUG [M:0;229bab1f9d30:44649 {}] zookeeper.ZKUtil(347): master:44649-0x100074d695d0000, quorum=127.0.0.1:50945, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-08T21:06:00,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44649-0x100074d695d0000, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T21:06:00,304 WARN [M:0;229bab1f9d30:44649 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-08T21:06:00,305 INFO [M:0;229bab1f9d30:44649 {}] master.ServerManager(1134): Rewriting .lastflushedseqids file at: hdfs://localhost:44471/user/jenkins/test-data/2ff44ddd-b1ff-5121-5db5-0ddcd2992ca0/.lastflushedseqids 2024-12-08T21:06:00,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37947 is added to blk_1073741846_1024 (size=99) 2024-12-08T21:06:00,388 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39161-0x100074d695d0003, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T21:06:00,388 INFO [RS:0;229bab1f9d30:39161 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T21:06:00,388 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39161-0x100074d695d0003, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T21:06:00,388 INFO [RS:0;229bab1f9d30:39161 {}] regionserver.HRegionServer(1031): Exiting; stopping=229bab1f9d30,39161,1733691945319; zookeeper connection closed. 2024-12-08T21:06:00,389 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@383c40a8 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@383c40a8 2024-12-08T21:06:00,389 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-08T21:06:00,712 INFO [M:0;229bab1f9d30:44649 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-08T21:06:00,712 INFO [M:0;229bab1f9d30:44649 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-08T21:06:00,712 DEBUG [M:0;229bab1f9d30:44649 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T21:06:00,713 INFO [M:0;229bab1f9d30:44649 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T21:06:00,713 DEBUG [M:0;229bab1f9d30:44649 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T21:06:00,713 DEBUG [M:0;229bab1f9d30:44649 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T21:06:00,713 DEBUG [M:0;229bab1f9d30:44649 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T21:06:00,714 INFO [M:0;229bab1f9d30:44649 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T21:06:00,714 DEBUG [M:0;229bab1f9d30:44649 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733691960712Disabling compacts and flushes for region at 1733691960712Disabling writes for close at 1733691960713 (+1 ms)Writing region close event to WAL at 1733691960714 (+1 ms)Closed at 1733691960714 2024-12-08T21:06:00,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37947 is added to blk_1073741844_1022 (size=93) 2024-12-08T21:06:00,718 INFO [M:0;229bab1f9d30:44649 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-08T21:06:00,718 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-08T21:06:00,718 INFO [M:0;229bab1f9d30:44649 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44649 2024-12-08T21:06:00,718 INFO [M:0;229bab1f9d30:44649 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-08T21:06:00,846 INFO [M:0;229bab1f9d30:44649 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-08T21:06:00,846 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44649-0x100074d695d0000, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T21:06:00,846 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44649-0x100074d695d0000, quorum=127.0.0.1:50945, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T21:06:00,851 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7182828b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T21:06:00,855 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@11857d05{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T21:06:00,855 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T21:06:00,855 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5e882389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T21:06:00,856 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4f4c4215{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf629df9-c31b-0364-7463-e2ec46277658/hadoop.log.dir/,STOPPED} 2024-12-08T21:06:00,860 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T21:06:00,860 WARN [BP-814579981-172.17.0.2-1733691940979 heartbeating to localhost/127.0.0.1:44471 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T21:06:00,860 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T21:06:00,860 WARN [BP-814579981-172.17.0.2-1733691940979 heartbeating to localhost/127.0.0.1:44471 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-814579981-172.17.0.2-1733691940979 (Datanode Uuid e6fa4c1f-8ff3-45f3-a7e5-f8e254ade171) service to localhost/127.0.0.1:44471 2024-12-08T21:06:00,862 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf629df9-c31b-0364-7463-e2ec46277658/cluster_9acabe47-1c98-0432-8392-c1b14ea2a8ac/data/data1/current/BP-814579981-172.17.0.2-1733691940979 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T21:06:00,862 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf629df9-c31b-0364-7463-e2ec46277658/cluster_9acabe47-1c98-0432-8392-c1b14ea2a8ac/data/data2/current/BP-814579981-172.17.0.2-1733691940979 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T21:06:00,863 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T21:06:00,874 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@26c59a36{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T21:06:00,875 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@40c01bb1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T21:06:00,875 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T21:06:00,875 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@61fd4728{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T21:06:00,875 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@d13f332{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cf629df9-c31b-0364-7463-e2ec46277658/hadoop.log.dir/,STOPPED} 2024-12-08T21:06:00,881 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-08T21:06:00,897 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-08T21:06:00,904 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: master.TestMasterFailoverBalancerPersistence#testMasterFailoverBalancerPersistence Thread=70 (was 11) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (301154381) connection to localhost/127.0.0.1:44471 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-4-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44471 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@403eae7d java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: process reaper java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:44471 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44471 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44471 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-4-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: process reaper java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-4-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (301154381) connection to localhost/127.0.0.1:44471 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/229bab1f9d30:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:44471 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: IPC Client (301154381) connection to localhost/127.0.0.1:44471 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: regionserver/229bab1f9d30:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: master/229bab1f9d30:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=383 (was 285) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=190 (was 171) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=16335 (was 16731)