2024-12-09 16:56:09,554 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@6f204a1a 2024-12-09 16:56:09,564 main DEBUG Took 0.008962 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-09 16:56:09,565 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-09 16:56:09,565 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-09 16:56:09,566 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-09 16:56:09,567 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 16:56:09,579 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-09 16:56:09,593 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 16:56:09,594 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 16:56:09,595 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 16:56:09,595 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 16:56:09,595 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 16:56:09,596 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 16:56:09,596 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 16:56:09,596 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 16:56:09,597 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 16:56:09,597 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 16:56:09,598 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 16:56:09,598 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 16:56:09,598 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 16:56:09,598 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 16:56:09,599 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 16:56:09,599 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 16:56:09,599 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 16:56:09,600 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 16:56:09,600 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 16:56:09,600 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 16:56:09,601 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 16:56:09,601 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 16:56:09,601 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 16:56:09,601 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 16:56:09,602 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 16:56:09,602 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-09 16:56:09,603 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 16:56:09,604 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-09 16:56:09,606 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-09 16:56:09,606 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-09 16:56:09,607 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-09 16:56:09,608 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-09 16:56:09,614 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-09 16:56:09,617 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-09 16:56:09,618 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-09 16:56:09,619 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-09 16:56:09,619 main DEBUG createAppenders(={Console}) 2024-12-09 16:56:09,620 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@6f204a1a initialized 2024-12-09 16:56:09,620 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@6f204a1a 2024-12-09 16:56:09,620 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@6f204a1a OK. 2024-12-09 16:56:09,620 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-09 16:56:09,621 main DEBUG OutputStream closed 2024-12-09 16:56:09,621 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-09 16:56:09,621 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-09 16:56:09,621 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@21e360a OK 2024-12-09 16:56:09,681 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-09 16:56:09,684 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-09 16:56:09,685 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-09 16:56:09,686 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-09 16:56:09,687 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-09 16:56:09,687 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-09 16:56:09,687 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-09 16:56:09,688 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-09 16:56:09,688 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-09 16:56:09,688 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-09 16:56:09,688 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-09 16:56:09,689 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-09 16:56:09,689 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-09 16:56:09,689 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-09 16:56:09,689 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-09 16:56:09,690 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-09 16:56:09,690 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-09 16:56:09,691 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-09 16:56:09,692 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-09 16:56:09,693 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@42b02722) with optional ClassLoader: null 2024-12-09 16:56:09,693 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-09 16:56:09,694 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@42b02722] started OK. 2024-12-09T16:56:09,704 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle timeout: 13 mins 2024-12-09 16:56:09,707 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-09 16:56:09,707 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-09T16:56:09,958 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45b4b7c4-6fbe-05c7-b282-c80884884d21 2024-12-09T16:56:09,971 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=2, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T16:56:09,985 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45b4b7c4-6fbe-05c7-b282-c80884884d21/cluster_11d82abb-857b-0c7e-56cd-942d2bbc8a40, deleteOnExit=true 2024-12-09T16:56:09,985 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-09T16:56:09,986 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45b4b7c4-6fbe-05c7-b282-c80884884d21/test.cache.data in system properties and HBase conf 2024-12-09T16:56:09,987 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45b4b7c4-6fbe-05c7-b282-c80884884d21/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T16:56:09,987 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45b4b7c4-6fbe-05c7-b282-c80884884d21/hadoop.log.dir in system properties and HBase conf 2024-12-09T16:56:09,988 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45b4b7c4-6fbe-05c7-b282-c80884884d21/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T16:56:09,988 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45b4b7c4-6fbe-05c7-b282-c80884884d21/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T16:56:09,988 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T16:56:10,069 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-09T16:56:10,157 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T16:56:10,161 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45b4b7c4-6fbe-05c7-b282-c80884884d21/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T16:56:10,161 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45b4b7c4-6fbe-05c7-b282-c80884884d21/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T16:56:10,162 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45b4b7c4-6fbe-05c7-b282-c80884884d21/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T16:56:10,162 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45b4b7c4-6fbe-05c7-b282-c80884884d21/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T16:56:10,163 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45b4b7c4-6fbe-05c7-b282-c80884884d21/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T16:56:10,163 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45b4b7c4-6fbe-05c7-b282-c80884884d21/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T16:56:10,164 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45b4b7c4-6fbe-05c7-b282-c80884884d21/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T16:56:10,164 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45b4b7c4-6fbe-05c7-b282-c80884884d21/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T16:56:10,165 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45b4b7c4-6fbe-05c7-b282-c80884884d21/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T16:56:10,165 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45b4b7c4-6fbe-05c7-b282-c80884884d21/nfs.dump.dir in system properties and HBase conf 2024-12-09T16:56:10,166 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45b4b7c4-6fbe-05c7-b282-c80884884d21/java.io.tmpdir in system properties and HBase conf 2024-12-09T16:56:10,166 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45b4b7c4-6fbe-05c7-b282-c80884884d21/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T16:56:10,167 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45b4b7c4-6fbe-05c7-b282-c80884884d21/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T16:56:10,167 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45b4b7c4-6fbe-05c7-b282-c80884884d21/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T16:56:11,283 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-09T16:56:11,522 INFO [Time-limited test {}] log.Log(170): Logging initialized @2585ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-09T16:56:11,659 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T16:56:11,819 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T16:56:11,921 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T16:56:11,923 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T16:56:11,925 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T16:56:11,957 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T16:56:11,977 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6dc391a7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45b4b7c4-6fbe-05c7-b282-c80884884d21/hadoop.log.dir/,AVAILABLE} 2024-12-09T16:56:11,979 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@13ffc098{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T16:56:12,356 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@64e450a9{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45b4b7c4-6fbe-05c7-b282-c80884884d21/java.io.tmpdir/jetty-localhost-41891-hadoop-hdfs-3_4_1-tests_jar-_-any-6019356285647650145/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T16:56:12,387 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4f72973e{HTTP/1.1, (http/1.1)}{localhost:41891} 2024-12-09T16:56:12,389 INFO [Time-limited test {}] server.Server(415): Started @3451ms 2024-12-09T16:56:13,349 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T16:56:13,369 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T16:56:13,396 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T16:56:13,396 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T16:56:13,397 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T16:56:13,397 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5cb3ceaf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45b4b7c4-6fbe-05c7-b282-c80884884d21/hadoop.log.dir/,AVAILABLE} 2024-12-09T16:56:13,398 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@b94fc88{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T16:56:13,571 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6f34895c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45b4b7c4-6fbe-05c7-b282-c80884884d21/java.io.tmpdir/jetty-localhost-42009-hadoop-hdfs-3_4_1-tests_jar-_-any-15110938190833568400/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T16:56:13,572 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@695fa2ad{HTTP/1.1, (http/1.1)}{localhost:42009} 2024-12-09T16:56:13,572 INFO [Time-limited test {}] server.Server(415): Started @4636ms 2024-12-09T16:56:13,682 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T16:56:13,974 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T16:56:13,982 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T16:56:14,038 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T16:56:14,038 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T16:56:14,038 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T16:56:14,040 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5547b1d3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45b4b7c4-6fbe-05c7-b282-c80884884d21/hadoop.log.dir/,AVAILABLE} 2024-12-09T16:56:14,041 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3ffa6420{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T16:56:14,210 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@546ff28f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45b4b7c4-6fbe-05c7-b282-c80884884d21/java.io.tmpdir/jetty-localhost-34727-hadoop-hdfs-3_4_1-tests_jar-_-any-16454974621509242386/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T16:56:14,212 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1852a3e1{HTTP/1.1, (http/1.1)}{localhost:34727} 2024-12-09T16:56:14,212 INFO [Time-limited test {}] server.Server(415): Started @5276ms 2024-12-09T16:56:14,216 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T16:56:15,248 WARN [Thread-99 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45b4b7c4-6fbe-05c7-b282-c80884884d21/cluster_11d82abb-857b-0c7e-56cd-942d2bbc8a40/data/data1/current/BP-749996034-172.17.0.2-1733763370643/current, will proceed with Du for space computation calculation, 2024-12-09T16:56:15,249 WARN [Thread-101 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45b4b7c4-6fbe-05c7-b282-c80884884d21/cluster_11d82abb-857b-0c7e-56cd-942d2bbc8a40/data/data2/current/BP-749996034-172.17.0.2-1733763370643/current, will proceed with Du for space computation calculation, 2024-12-09T16:56:15,249 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45b4b7c4-6fbe-05c7-b282-c80884884d21/cluster_11d82abb-857b-0c7e-56cd-942d2bbc8a40/data/data3/current/BP-749996034-172.17.0.2-1733763370643/current, will proceed with Du for space computation calculation, 2024-12-09T16:56:15,250 WARN [Thread-100 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45b4b7c4-6fbe-05c7-b282-c80884884d21/cluster_11d82abb-857b-0c7e-56cd-942d2bbc8a40/data/data4/current/BP-749996034-172.17.0.2-1733763370643/current, will proceed with Du for space computation calculation, 2024-12-09T16:56:15,310 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T16:56:15,357 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T16:56:15,409 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x96af43e8e5e8b736 with lease ID 0x9006595a8c0047ac: Processing first storage report for DS-0ead2a8b-c0ef-4a72-b147-a2a8eecbb657 from datanode DatanodeRegistration(127.0.0.1:32855, datanodeUuid=33066ccb-b9e7-48ff-8d2e-71ab2517cb39, infoPort=37237, infoSecurePort=0, ipcPort=34953, storageInfo=lv=-57;cid=testClusterID;nsid=2137487185;c=1733763370643) 2024-12-09T16:56:15,410 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x96af43e8e5e8b736 with lease ID 0x9006595a8c0047ac: from storage DS-0ead2a8b-c0ef-4a72-b147-a2a8eecbb657 node DatanodeRegistration(127.0.0.1:32855, datanodeUuid=33066ccb-b9e7-48ff-8d2e-71ab2517cb39, infoPort=37237, infoSecurePort=0, ipcPort=34953, storageInfo=lv=-57;cid=testClusterID;nsid=2137487185;c=1733763370643), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-12-09T16:56:15,411 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc62183d1b8b4bb8e with lease ID 0x9006595a8c0047ab: Processing first storage report for DS-e0df493e-6ab7-4aa1-86df-3f605eb019bb from datanode DatanodeRegistration(127.0.0.1:39355, datanodeUuid=60a74b0c-cca6-4717-9239-01383c2c5f4c, infoPort=33341, infoSecurePort=0, ipcPort=38443, storageInfo=lv=-57;cid=testClusterID;nsid=2137487185;c=1733763370643) 2024-12-09T16:56:15,411 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc62183d1b8b4bb8e with lease ID 0x9006595a8c0047ab: from storage DS-e0df493e-6ab7-4aa1-86df-3f605eb019bb node DatanodeRegistration(127.0.0.1:39355, datanodeUuid=60a74b0c-cca6-4717-9239-01383c2c5f4c, infoPort=33341, infoSecurePort=0, ipcPort=38443, storageInfo=lv=-57;cid=testClusterID;nsid=2137487185;c=1733763370643), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T16:56:15,411 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x96af43e8e5e8b736 with lease ID 0x9006595a8c0047ac: Processing first storage report for DS-2aa10e90-6085-4746-9f41-da202ac07619 from datanode DatanodeRegistration(127.0.0.1:32855, datanodeUuid=33066ccb-b9e7-48ff-8d2e-71ab2517cb39, infoPort=37237, infoSecurePort=0, ipcPort=34953, storageInfo=lv=-57;cid=testClusterID;nsid=2137487185;c=1733763370643) 2024-12-09T16:56:15,411 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x96af43e8e5e8b736 with lease ID 0x9006595a8c0047ac: from storage DS-2aa10e90-6085-4746-9f41-da202ac07619 node DatanodeRegistration(127.0.0.1:32855, datanodeUuid=33066ccb-b9e7-48ff-8d2e-71ab2517cb39, infoPort=37237, infoSecurePort=0, ipcPort=34953, storageInfo=lv=-57;cid=testClusterID;nsid=2137487185;c=1733763370643), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T16:56:15,418 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc62183d1b8b4bb8e with lease ID 0x9006595a8c0047ab: Processing first storage report for DS-c3b55359-240c-4ae0-bc9e-6d865d17db74 from datanode DatanodeRegistration(127.0.0.1:39355, datanodeUuid=60a74b0c-cca6-4717-9239-01383c2c5f4c, infoPort=33341, infoSecurePort=0, ipcPort=38443, storageInfo=lv=-57;cid=testClusterID;nsid=2137487185;c=1733763370643) 2024-12-09T16:56:15,419 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc62183d1b8b4bb8e with lease ID 0x9006595a8c0047ab: from storage DS-c3b55359-240c-4ae0-bc9e-6d865d17db74 node DatanodeRegistration(127.0.0.1:39355, datanodeUuid=60a74b0c-cca6-4717-9239-01383c2c5f4c, infoPort=33341, infoSecurePort=0, ipcPort=38443, storageInfo=lv=-57;cid=testClusterID;nsid=2137487185;c=1733763370643), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T16:56:15,452 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45b4b7c4-6fbe-05c7-b282-c80884884d21 2024-12-09T16:56:15,611 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45b4b7c4-6fbe-05c7-b282-c80884884d21/cluster_11d82abb-857b-0c7e-56cd-942d2bbc8a40/zookeeper_0, clientPort=54803, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45b4b7c4-6fbe-05c7-b282-c80884884d21/cluster_11d82abb-857b-0c7e-56cd-942d2bbc8a40/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45b4b7c4-6fbe-05c7-b282-c80884884d21/cluster_11d82abb-857b-0c7e-56cd-942d2bbc8a40/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T16:56:15,636 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=54803 2024-12-09T16:56:15,655 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:56:15,658 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:56:16,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32855 is added to blk_1073741825_1001 (size=7) 2024-12-09T16:56:16,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39355 is added to blk_1073741825_1001 (size=7) 2024-12-09T16:56:16,483 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd with version=8 2024-12-09T16:56:16,484 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/hbase-staging 2024-12-09T16:56:16,663 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-09T16:56:17,030 INFO [Time-limited test {}] client.ConnectionUtils(128): master/7b4ec1980089:0 server-side Connection retries=45 2024-12-09T16:56:17,042 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T16:56:17,043 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T16:56:17,067 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T16:56:17,068 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T16:56:17,068 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T16:56:17,260 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T16:56:17,352 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-09T16:56:17,367 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-09T16:56:17,375 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T16:56:17,421 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 108273 (auto-detected) 2024-12-09T16:56:17,422 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-09T16:56:17,469 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36647 2024-12-09T16:56:17,498 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:36647 connecting to ZooKeeper ensemble=127.0.0.1:54803 2024-12-09T16:56:17,657 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:366470x0, quorum=127.0.0.1:54803, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T16:56:17,670 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:36647-0x1000b8f605d0000 connected 2024-12-09T16:56:17,772 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:56:17,776 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:56:17,791 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36647-0x1000b8f605d0000, quorum=127.0.0.1:54803, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T16:56:17,795 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd, hbase.cluster.distributed=false 2024-12-09T16:56:17,826 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36647-0x1000b8f605d0000, quorum=127.0.0.1:54803, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T16:56:17,859 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36647 2024-12-09T16:56:17,868 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36647 2024-12-09T16:56:17,869 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36647 2024-12-09T16:56:17,871 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36647 2024-12-09T16:56:17,871 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36647 2024-12-09T16:56:18,067 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/7b4ec1980089:0 server-side Connection retries=45 2024-12-09T16:56:18,070 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T16:56:18,072 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T16:56:18,073 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T16:56:18,073 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T16:56:18,073 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T16:56:18,080 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T16:56:18,097 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T16:56:18,106 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34213 2024-12-09T16:56:18,108 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34213 connecting to ZooKeeper ensemble=127.0.0.1:54803 2024-12-09T16:56:18,118 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:56:18,122 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:56:18,162 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:342130x0, quorum=127.0.0.1:54803, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T16:56:18,165 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:342130x0, quorum=127.0.0.1:54803, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T16:56:18,176 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34213-0x1000b8f605d0001 connected 2024-12-09T16:56:18,179 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T16:56:18,197 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T16:56:18,203 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34213-0x1000b8f605d0001, quorum=127.0.0.1:54803, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T16:56:18,210 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34213-0x1000b8f605d0001, quorum=127.0.0.1:54803, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T16:56:18,218 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34213 2024-12-09T16:56:18,219 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34213 2024-12-09T16:56:18,234 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34213 2024-12-09T16:56:18,263 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34213 2024-12-09T16:56:18,264 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34213 2024-12-09T16:56:18,303 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/7b4ec1980089:0 server-side Connection retries=45 2024-12-09T16:56:18,304 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T16:56:18,304 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T16:56:18,305 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T16:56:18,305 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T16:56:18,306 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T16:56:18,306 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T16:56:18,309 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T16:56:18,312 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36153 2024-12-09T16:56:18,316 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36153 connecting to ZooKeeper ensemble=127.0.0.1:54803 2024-12-09T16:56:18,319 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:56:18,326 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:56:18,386 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:361530x0, quorum=127.0.0.1:54803, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T16:56:18,388 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:361530x0, quorum=127.0.0.1:54803, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T16:56:18,389 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T16:56:18,388 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36153-0x1000b8f605d0002 connected 2024-12-09T16:56:18,405 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T16:56:18,408 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36153-0x1000b8f605d0002, quorum=127.0.0.1:54803, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T16:56:18,419 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36153-0x1000b8f605d0002, quorum=127.0.0.1:54803, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T16:56:18,426 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36153 2024-12-09T16:56:18,427 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36153 2024-12-09T16:56:18,440 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36153 2024-12-09T16:56:18,447 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36153 2024-12-09T16:56:18,459 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36153 2024-12-09T16:56:18,508 DEBUG [M:0;7b4ec1980089:36647 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;7b4ec1980089:36647 2024-12-09T16:56:18,516 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/7b4ec1980089,36647,1733763376785 2024-12-09T16:56:18,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34213-0x1000b8f605d0001, quorum=127.0.0.1:54803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T16:56:18,563 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36647-0x1000b8f605d0000, quorum=127.0.0.1:54803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T16:56:18,563 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36153-0x1000b8f605d0002, quorum=127.0.0.1:54803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T16:56:18,566 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36647-0x1000b8f605d0000, quorum=127.0.0.1:54803, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/7b4ec1980089,36647,1733763376785 2024-12-09T16:56:18,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34213-0x1000b8f605d0001, quorum=127.0.0.1:54803, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T16:56:18,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36647-0x1000b8f605d0000, quorum=127.0.0.1:54803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:56:18,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36153-0x1000b8f605d0002, quorum=127.0.0.1:54803, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T16:56:18,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36153-0x1000b8f605d0002, quorum=127.0.0.1:54803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:56:18,638 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34213-0x1000b8f605d0001, quorum=127.0.0.1:54803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:56:18,643 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36647-0x1000b8f605d0000, quorum=127.0.0.1:54803, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T16:56:18,648 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/7b4ec1980089,36647,1733763376785 from backup master directory 2024-12-09T16:56:18,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34213-0x1000b8f605d0001, quorum=127.0.0.1:54803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T16:56:18,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36647-0x1000b8f605d0000, quorum=127.0.0.1:54803, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/7b4ec1980089,36647,1733763376785 2024-12-09T16:56:18,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36647-0x1000b8f605d0000, quorum=127.0.0.1:54803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T16:56:18,662 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36153-0x1000b8f605d0002, quorum=127.0.0.1:54803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T16:56:18,663 WARN [master/7b4ec1980089:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T16:56:18,663 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=7b4ec1980089,36647,1733763376785 2024-12-09T16:56:18,665 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-09T16:56:18,671 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-09T16:56:18,778 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/hbase.id] with ID: 69b0da9d-401b-4853-8997-55479f4db3e3 2024-12-09T16:56:18,778 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/.tmp/hbase.id 2024-12-09T16:56:18,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32855 is added to blk_1073741826_1002 (size=42) 2024-12-09T16:56:18,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39355 is added to blk_1073741826_1002 (size=42) 2024-12-09T16:56:18,836 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/.tmp/hbase.id]:[hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/hbase.id] 2024-12-09T16:56:18,940 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:56:18,948 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T16:56:18,978 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 27ms. 2024-12-09T16:56:19,003 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34213-0x1000b8f605d0001, quorum=127.0.0.1:54803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:56:19,003 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36153-0x1000b8f605d0002, quorum=127.0.0.1:54803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:56:19,004 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36647-0x1000b8f605d0000, quorum=127.0.0.1:54803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:56:19,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39355 is added to blk_1073741827_1003 (size=196) 2024-12-09T16:56:19,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32855 is added to blk_1073741827_1003 (size=196) 2024-12-09T16:56:19,116 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T16:56:19,118 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T16:56:19,133 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:150) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:174) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:262) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:231) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:400) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:1003) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2535) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:613) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.lambda$tracedRunnable$2(TraceUtil.java:155) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:56:19,137 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T16:56:19,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32855 is added to blk_1073741828_1004 (size=1189) 2024-12-09T16:56:19,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39355 is added to blk_1073741828_1004 (size=1189) 2024-12-09T16:56:19,203 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/MasterData/data/master/store 2024-12-09T16:56:19,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39355 is added to blk_1073741829_1005 (size=34) 2024-12-09T16:56:19,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32855 is added to blk_1073741829_1005 (size=34) 2024-12-09T16:56:19,246 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-09T16:56:19,256 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T16:56:19,258 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T16:56:19,258 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:56:19,259 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:56:19,261 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T16:56:19,261 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:56:19,261 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:56:19,263 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733763379258Disabling compacts and flushes for region at 1733763379258Disabling writes for close at 1733763379261 (+3 ms)Writing region close event to WAL at 1733763379261Closed at 1733763379261 2024-12-09T16:56:19,266 WARN [master/7b4ec1980089:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/MasterData/data/master/store/.initializing 2024-12-09T16:56:19,266 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/MasterData/WALs/7b4ec1980089,36647,1733763376785 2024-12-09T16:56:19,282 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-09T16:56:19,315 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7b4ec1980089%2C36647%2C1733763376785, suffix=, logDir=hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/MasterData/WALs/7b4ec1980089,36647,1733763376785, archiveDir=hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/MasterData/oldWALs, maxLogs=10 2024-12-09T16:56:19,360 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/MasterData/WALs/7b4ec1980089,36647,1733763376785/7b4ec1980089%2C36647%2C1733763376785.1733763379330, exclude list is [], retry=0 2024-12-09T16:56:19,405 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39355,DS-e0df493e-6ab7-4aa1-86df-3f605eb019bb,DISK] 2024-12-09T16:56:19,410 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-09T16:56:19,410 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32855,DS-0ead2a8b-c0ef-4a72-b147-a2a8eecbb657,DISK] 2024-12-09T16:56:19,472 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/MasterData/WALs/7b4ec1980089,36647,1733763376785/7b4ec1980089%2C36647%2C1733763376785.1733763379330 2024-12-09T16:56:19,476 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33341:33341),(127.0.0.1/127.0.0.1:37237:37237)] 2024-12-09T16:56:19,477 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T16:56:19,477 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T16:56:19,488 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:56:19,493 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:56:19,571 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:56:19,642 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T16:56:19,649 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:56:19,653 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:56:19,654 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:56:19,662 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T16:56:19,663 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:56:19,664 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T16:56:19,665 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:56:19,671 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T16:56:19,671 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:56:19,673 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T16:56:19,673 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:56:19,681 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T16:56:19,681 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:56:19,683 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T16:56:19,684 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:56:19,693 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:56:19,694 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:56:19,712 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:56:19,712 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:56:19,718 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T16:56:19,723 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:56:19,736 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T16:56:19,740 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74596584, jitterRate=0.11157572269439697}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T16:56:19,749 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733763379510Initializing all the Stores at 1733763379513 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733763379514 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733763379532 (+18 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733763379532Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733763379532Cleaning up temporary data from old regions at 1733763379713 (+181 ms)Region opened successfully at 1733763379749 (+36 ms) 2024-12-09T16:56:19,755 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T16:56:19,822 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9eae00d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7b4ec1980089/172.17.0.2:0 2024-12-09T16:56:19,869 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T16:56:19,885 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T16:56:19,885 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T16:56:19,889 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T16:56:19,890 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-09T16:56:19,896 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 5 msec 2024-12-09T16:56:19,896 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T16:56:19,926 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T16:56:19,935 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36647-0x1000b8f605d0000, quorum=127.0.0.1:54803, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T16:56:19,978 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T16:56:19,980 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T16:56:19,982 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36647-0x1000b8f605d0000, quorum=127.0.0.1:54803, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T16:56:19,986 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T16:56:19,988 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T16:56:19,991 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36647-0x1000b8f605d0000, quorum=127.0.0.1:54803, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T16:56:20,003 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T16:56:20,005 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36647-0x1000b8f605d0000, quorum=127.0.0.1:54803, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T16:56:20,011 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T16:56:20,031 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36647-0x1000b8f605d0000, quorum=127.0.0.1:54803, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T16:56:20,036 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T16:56:20,044 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36153-0x1000b8f605d0002, quorum=127.0.0.1:54803, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T16:56:20,044 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36647-0x1000b8f605d0000, quorum=127.0.0.1:54803, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T16:56:20,044 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34213-0x1000b8f605d0001, quorum=127.0.0.1:54803, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T16:56:20,045 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36153-0x1000b8f605d0002, quorum=127.0.0.1:54803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:56:20,045 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36647-0x1000b8f605d0000, quorum=127.0.0.1:54803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:56:20,045 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34213-0x1000b8f605d0001, quorum=127.0.0.1:54803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:56:20,048 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=7b4ec1980089,36647,1733763376785, sessionid=0x1000b8f605d0000, setting cluster-up flag (Was=false) 2024-12-09T16:56:20,069 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34213-0x1000b8f605d0001, quorum=127.0.0.1:54803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:56:20,069 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36647-0x1000b8f605d0000, quorum=127.0.0.1:54803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:56:20,070 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36153-0x1000b8f605d0002, quorum=127.0.0.1:54803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:56:20,099 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T16:56:20,104 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7b4ec1980089,36647,1733763376785 2024-12-09T16:56:20,128 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36647-0x1000b8f605d0000, quorum=127.0.0.1:54803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:56:20,128 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34213-0x1000b8f605d0001, quorum=127.0.0.1:54803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:56:20,128 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36153-0x1000b8f605d0002, quorum=127.0.0.1:54803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:56:20,161 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T16:56:20,164 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7b4ec1980089,36647,1733763376785 2024-12-09T16:56:20,180 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T16:56:20,226 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.quotas.MasterQuotasObserver loaded, priority=536870911. 2024-12-09T16:56:20,276 INFO [RS:0;7b4ec1980089:34213 {}] regionserver.HRegionServer(746): ClusterId : 69b0da9d-401b-4853-8997-55479f4db3e3 2024-12-09T16:56:20,281 INFO [RS:1;7b4ec1980089:36153 {}] regionserver.HRegionServer(746): ClusterId : 69b0da9d-401b-4853-8997-55479f4db3e3 2024-12-09T16:56:20,289 DEBUG [RS:0;7b4ec1980089:34213 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T16:56:20,289 DEBUG [RS:1;7b4ec1980089:36153 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T16:56:20,295 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T16:56:20,306 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T16:56:20,314 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T16:56:20,321 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 7b4ec1980089,36647,1733763376785 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T16:56:20,330 DEBUG [RS:0;7b4ec1980089:34213 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T16:56:20,330 DEBUG [RS:0;7b4ec1980089:34213 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T16:56:20,330 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/7b4ec1980089:0, corePoolSize=5, maxPoolSize=5 2024-12-09T16:56:20,331 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/7b4ec1980089:0, corePoolSize=5, maxPoolSize=5 2024-12-09T16:56:20,331 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/7b4ec1980089:0, corePoolSize=5, maxPoolSize=5 2024-12-09T16:56:20,331 DEBUG [RS:1;7b4ec1980089:36153 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T16:56:20,331 DEBUG [RS:1;7b4ec1980089:36153 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T16:56:20,331 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/7b4ec1980089:0, corePoolSize=5, maxPoolSize=5 2024-12-09T16:56:20,331 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/7b4ec1980089:0, corePoolSize=10, maxPoolSize=10 2024-12-09T16:56:20,332 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/7b4ec1980089:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:56:20,332 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/7b4ec1980089:0, corePoolSize=2, maxPoolSize=2 2024-12-09T16:56:20,332 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/7b4ec1980089:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:56:20,343 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733763410343 2024-12-09T16:56:20,345 DEBUG [RS:0;7b4ec1980089:34213 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T16:56:20,345 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T16:56:20,346 DEBUG [RS:0;7b4ec1980089:34213 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68a0779d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7b4ec1980089/172.17.0.2:0 2024-12-09T16:56:20,347 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T16:56:20,348 DEBUG [RS:1;7b4ec1980089:36153 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T16:56:20,348 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T16:56:20,349 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T16:56:20,350 DEBUG [RS:1;7b4ec1980089:36153 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@73f1dcd4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7b4ec1980089/172.17.0.2:0 2024-12-09T16:56:20,352 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T16:56:20,352 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T16:56:20,353 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T16:56:20,353 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T16:56:20,359 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:56:20,360 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T16:56:20,374 DEBUG [RS:0;7b4ec1980089:34213 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;7b4ec1980089:34213 2024-12-09T16:56:20,378 INFO [RS:0;7b4ec1980089:34213 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T16:56:20,378 INFO [RS:0;7b4ec1980089:34213 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T16:56:20,378 DEBUG [RS:0;7b4ec1980089:34213 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T16:56:20,378 DEBUG [RS:1;7b4ec1980089:36153 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;7b4ec1980089:36153 2024-12-09T16:56:20,379 INFO [RS:1;7b4ec1980089:36153 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T16:56:20,379 INFO [RS:1;7b4ec1980089:36153 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T16:56:20,379 DEBUG [RS:1;7b4ec1980089:36153 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T16:56:20,382 INFO [RS:1;7b4ec1980089:36153 {}] regionserver.HRegionServer(2659): reportForDuty to master=7b4ec1980089,36647,1733763376785 with port=36153, startcode=1733763378299 2024-12-09T16:56:20,383 INFO [RS:0;7b4ec1980089:34213 {}] regionserver.HRegionServer(2659): reportForDuty to master=7b4ec1980089,36647,1733763376785 with port=34213, startcode=1733763377986 2024-12-09T16:56:20,399 DEBUG [RS:0;7b4ec1980089:34213 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T16:56:20,400 DEBUG [RS:1;7b4ec1980089:36153 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T16:56:20,407 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T16:56:20,431 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T16:56:20,432 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T16:56:20,433 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T16:56:20,439 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T16:56:20,439 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T16:56:20,448 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/7b4ec1980089:0:becomeActiveMaster-HFileCleaner.large.0-1733763380441,5,FailOnTimeoutGroup] 2024-12-09T16:56:20,454 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/7b4ec1980089:0:becomeActiveMaster-HFileCleaner.small.0-1733763380448,5,FailOnTimeoutGroup] 2024-12-09T16:56:20,454 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T16:56:20,454 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T16:56:20,455 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T16:56:20,456 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T16:56:20,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32855 is added to blk_1073741831_1007 (size=1321) 2024-12-09T16:56:20,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39355 is added to blk_1073741831_1007 (size=1321) 2024-12-09T16:56:20,472 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T16:56:20,473 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd 2024-12-09T16:56:20,486 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52825, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T16:56:20,486 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48087, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T16:56:20,494 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36647 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 7b4ec1980089,36153,1733763378299 2024-12-09T16:56:20,496 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36647 {}] master.ServerManager(517): Registering regionserver=7b4ec1980089,36153,1733763378299 2024-12-09T16:56:20,507 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36647 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 7b4ec1980089,34213,1733763377986 2024-12-09T16:56:20,507 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36647 {}] master.ServerManager(517): Registering regionserver=7b4ec1980089,34213,1733763377986 2024-12-09T16:56:20,511 DEBUG [RS:1;7b4ec1980089:36153 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd 2024-12-09T16:56:20,512 DEBUG [RS:1;7b4ec1980089:36153 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34305 2024-12-09T16:56:20,512 DEBUG [RS:1;7b4ec1980089:36153 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T16:56:20,514 DEBUG [RS:0;7b4ec1980089:34213 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd 2024-12-09T16:56:20,514 DEBUG [RS:0;7b4ec1980089:34213 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34305 2024-12-09T16:56:20,514 DEBUG [RS:0;7b4ec1980089:34213 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T16:56:20,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39355 is added to blk_1073741832_1008 (size=32) 2024-12-09T16:56:20,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32855 is added to blk_1073741832_1008 (size=32) 2024-12-09T16:56:20,519 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T16:56:20,522 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T16:56:20,525 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T16:56:20,525 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:56:20,526 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:56:20,526 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T16:56:20,529 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T16:56:20,530 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:56:20,531 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:56:20,531 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T16:56:20,534 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T16:56:20,534 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:56:20,535 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:56:20,535 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T16:56:20,542 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T16:56:20,543 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:56:20,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36647-0x1000b8f605d0000, quorum=127.0.0.1:54803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T16:56:20,544 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:56:20,545 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T16:56:20,546 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/hbase/meta/1588230740 2024-12-09T16:56:20,547 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/hbase/meta/1588230740 2024-12-09T16:56:20,551 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T16:56:20,551 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T16:56:20,553 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T16:56:20,558 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T16:56:20,576 DEBUG [RS:1;7b4ec1980089:36153 {}] zookeeper.ZKUtil(111): regionserver:36153-0x1000b8f605d0002, quorum=127.0.0.1:54803, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7b4ec1980089,36153,1733763378299 2024-12-09T16:56:20,577 WARN [RS:1;7b4ec1980089:36153 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T16:56:20,577 DEBUG [RS:0;7b4ec1980089:34213 {}] zookeeper.ZKUtil(111): regionserver:34213-0x1000b8f605d0001, quorum=127.0.0.1:54803, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7b4ec1980089,34213,1733763377986 2024-12-09T16:56:20,577 WARN [RS:0;7b4ec1980089:34213 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T16:56:20,577 INFO [RS:1;7b4ec1980089:36153 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T16:56:20,577 INFO [RS:0;7b4ec1980089:34213 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T16:56:20,579 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T16:56:20,583 DEBUG [RS:1;7b4ec1980089:36153 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/WALs/7b4ec1980089,36153,1733763378299 2024-12-09T16:56:20,583 DEBUG [RS:0;7b4ec1980089:34213 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/WALs/7b4ec1980089,34213,1733763377986 2024-12-09T16:56:20,584 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7b4ec1980089,34213,1733763377986] 2024-12-09T16:56:20,584 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7b4ec1980089,36153,1733763378299] 2024-12-09T16:56:20,584 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65536651, jitterRate=-0.023427799344062805}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T16:56:20,588 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733763380519Initializing all the Stores at 1733763380521 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733763380521Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733763380521Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733763380522 (+1 ms)Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733763380522Cleaning up temporary data from old regions at 1733763380552 (+30 ms)Region opened successfully at 1733763380588 (+36 ms) 2024-12-09T16:56:20,588 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T16:56:20,588 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T16:56:20,588 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T16:56:20,588 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T16:56:20,588 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T16:56:20,617 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T16:56:20,618 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733763380588Disabling compacts and flushes for region at 1733763380588Disabling writes for close at 1733763380588Writing region close event to WAL at 1733763380617 (+29 ms)Closed at 1733763380617 2024-12-09T16:56:20,621 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T16:56:20,621 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T16:56:20,637 INFO [RS:1;7b4ec1980089:36153 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T16:56:20,639 INFO [RS:0;7b4ec1980089:34213 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T16:56:20,642 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T16:56:20,654 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T16:56:20,660 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T16:56:20,669 INFO [RS:1;7b4ec1980089:36153 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T16:56:20,671 INFO [RS:0;7b4ec1980089:34213 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T16:56:20,675 INFO [RS:1;7b4ec1980089:36153 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T16:56:20,676 INFO [RS:1;7b4ec1980089:36153 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T16:56:20,682 INFO [RS:0;7b4ec1980089:34213 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T16:56:20,682 INFO [RS:0;7b4ec1980089:34213 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T16:56:20,684 INFO [RS:0;7b4ec1980089:34213 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T16:56:20,686 INFO [RS:1;7b4ec1980089:36153 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T16:56:20,691 INFO [RS:1;7b4ec1980089:36153 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T16:56:20,691 INFO [RS:0;7b4ec1980089:34213 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T16:56:20,693 INFO [RS:0;7b4ec1980089:34213 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T16:56:20,694 INFO [RS:1;7b4ec1980089:36153 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T16:56:20,694 DEBUG [RS:1;7b4ec1980089:36153 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7b4ec1980089:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:56:20,694 DEBUG [RS:0;7b4ec1980089:34213 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7b4ec1980089:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:56:20,694 DEBUG [RS:1;7b4ec1980089:36153 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7b4ec1980089:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:56:20,694 DEBUG [RS:0;7b4ec1980089:34213 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7b4ec1980089:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:56:20,694 DEBUG [RS:1;7b4ec1980089:36153 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7b4ec1980089:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:56:20,694 DEBUG [RS:0;7b4ec1980089:34213 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7b4ec1980089:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:56:20,694 DEBUG [RS:1;7b4ec1980089:36153 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7b4ec1980089:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:56:20,694 DEBUG [RS:0;7b4ec1980089:34213 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7b4ec1980089:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:56:20,694 DEBUG [RS:1;7b4ec1980089:36153 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7b4ec1980089:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:56:20,694 DEBUG [RS:0;7b4ec1980089:34213 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7b4ec1980089:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:56:20,695 DEBUG [RS:1;7b4ec1980089:36153 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7b4ec1980089:0, corePoolSize=2, maxPoolSize=2 2024-12-09T16:56:20,695 DEBUG [RS:0;7b4ec1980089:34213 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7b4ec1980089:0, corePoolSize=2, maxPoolSize=2 2024-12-09T16:56:20,695 DEBUG [RS:1;7b4ec1980089:36153 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7b4ec1980089:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:56:20,695 DEBUG [RS:0;7b4ec1980089:34213 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7b4ec1980089:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:56:20,696 DEBUG [RS:1;7b4ec1980089:36153 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7b4ec1980089:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:56:20,696 DEBUG [RS:0;7b4ec1980089:34213 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7b4ec1980089:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:56:20,696 DEBUG [RS:1;7b4ec1980089:36153 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7b4ec1980089:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:56:20,696 DEBUG [RS:0;7b4ec1980089:34213 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7b4ec1980089:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:56:20,696 DEBUG [RS:1;7b4ec1980089:36153 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/7b4ec1980089:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:56:20,696 DEBUG [RS:0;7b4ec1980089:34213 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/7b4ec1980089:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:56:20,696 DEBUG [RS:0;7b4ec1980089:34213 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7b4ec1980089:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:56:20,696 DEBUG [RS:1;7b4ec1980089:36153 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7b4ec1980089:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:56:20,696 DEBUG [RS:0;7b4ec1980089:34213 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7b4ec1980089:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:56:20,696 DEBUG [RS:1;7b4ec1980089:36153 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7b4ec1980089:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:56:20,696 DEBUG [RS:0;7b4ec1980089:34213 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7b4ec1980089:0, corePoolSize=3, maxPoolSize=3 2024-12-09T16:56:20,696 DEBUG [RS:1;7b4ec1980089:36153 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7b4ec1980089:0, corePoolSize=3, maxPoolSize=3 2024-12-09T16:56:20,696 DEBUG [RS:0;7b4ec1980089:34213 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7b4ec1980089:0, corePoolSize=3, maxPoolSize=3 2024-12-09T16:56:20,696 DEBUG [RS:1;7b4ec1980089:36153 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7b4ec1980089:0, corePoolSize=3, maxPoolSize=3 2024-12-09T16:56:20,710 INFO [RS:0;7b4ec1980089:34213 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T16:56:20,711 INFO [RS:0;7b4ec1980089:34213 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T16:56:20,711 INFO [RS:0;7b4ec1980089:34213 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T16:56:20,711 INFO [RS:0;7b4ec1980089:34213 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T16:56:20,711 INFO [RS:0;7b4ec1980089:34213 {}] hbase.ChoreService(168): Chore ScheduledChore name=FileSystemUtilizationChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T16:56:20,711 INFO [RS:0;7b4ec1980089:34213 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T16:56:20,712 INFO [RS:0;7b4ec1980089:34213 {}] hbase.ChoreService(168): Chore ScheduledChore name=7b4ec1980089,34213,1733763377986-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T16:56:20,715 INFO [RS:1;7b4ec1980089:36153 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T16:56:20,716 INFO [RS:1;7b4ec1980089:36153 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T16:56:20,717 INFO [RS:1;7b4ec1980089:36153 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T16:56:20,717 INFO [RS:1;7b4ec1980089:36153 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T16:56:20,717 INFO [RS:1;7b4ec1980089:36153 {}] hbase.ChoreService(168): Chore ScheduledChore name=FileSystemUtilizationChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T16:56:20,717 INFO [RS:1;7b4ec1980089:36153 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T16:56:20,717 INFO [RS:1;7b4ec1980089:36153 {}] hbase.ChoreService(168): Chore ScheduledChore name=7b4ec1980089,36153,1733763378299-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T16:56:20,741 INFO [RS:0;7b4ec1980089:34213 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T16:56:20,743 INFO [RS:0;7b4ec1980089:34213 {}] hbase.ChoreService(168): Chore ScheduledChore name=7b4ec1980089,34213,1733763377986-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T16:56:20,744 INFO [RS:0;7b4ec1980089:34213 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T16:56:20,744 INFO [RS:0;7b4ec1980089:34213 {}] regionserver.Replication(171): 7b4ec1980089,34213,1733763377986 started 2024-12-09T16:56:20,748 INFO [RS:1;7b4ec1980089:36153 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T16:56:20,749 INFO [RS:1;7b4ec1980089:36153 {}] hbase.ChoreService(168): Chore ScheduledChore name=7b4ec1980089,36153,1733763378299-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T16:56:20,749 INFO [RS:1;7b4ec1980089:36153 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T16:56:20,749 INFO [RS:1;7b4ec1980089:36153 {}] regionserver.Replication(171): 7b4ec1980089,36153,1733763378299 started 2024-12-09T16:56:20,770 INFO [RS:0;7b4ec1980089:34213 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T16:56:20,771 INFO [RS:0;7b4ec1980089:34213 {}] regionserver.HRegionServer(1482): Serving as 7b4ec1980089,34213,1733763377986, RpcServer on 7b4ec1980089/172.17.0.2:34213, sessionid=0x1000b8f605d0001 2024-12-09T16:56:20,772 DEBUG [RS:0;7b4ec1980089:34213 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T16:56:20,772 DEBUG [RS:0;7b4ec1980089:34213 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7b4ec1980089,34213,1733763377986 2024-12-09T16:56:20,772 INFO [RS:1;7b4ec1980089:36153 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T16:56:20,772 INFO [RS:1;7b4ec1980089:36153 {}] regionserver.HRegionServer(1482): Serving as 7b4ec1980089,36153,1733763378299, RpcServer on 7b4ec1980089/172.17.0.2:36153, sessionid=0x1000b8f605d0002 2024-12-09T16:56:20,772 DEBUG [RS:0;7b4ec1980089:34213 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7b4ec1980089,34213,1733763377986' 2024-12-09T16:56:20,773 DEBUG [RS:1;7b4ec1980089:36153 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T16:56:20,773 DEBUG [RS:0;7b4ec1980089:34213 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T16:56:20,773 DEBUG [RS:1;7b4ec1980089:36153 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7b4ec1980089,36153,1733763378299 2024-12-09T16:56:20,773 DEBUG [RS:1;7b4ec1980089:36153 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7b4ec1980089,36153,1733763378299' 2024-12-09T16:56:20,773 DEBUG [RS:1;7b4ec1980089:36153 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T16:56:20,774 DEBUG [RS:0;7b4ec1980089:34213 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T16:56:20,774 DEBUG [RS:1;7b4ec1980089:36153 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T16:56:20,775 DEBUG [RS:1;7b4ec1980089:36153 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T16:56:20,775 DEBUG [RS:1;7b4ec1980089:36153 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T16:56:20,776 DEBUG [RS:1;7b4ec1980089:36153 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7b4ec1980089,36153,1733763378299 2024-12-09T16:56:20,776 DEBUG [RS:1;7b4ec1980089:36153 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7b4ec1980089,36153,1733763378299' 2024-12-09T16:56:20,776 DEBUG [RS:1;7b4ec1980089:36153 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T16:56:20,776 DEBUG [RS:1;7b4ec1980089:36153 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T16:56:20,777 DEBUG [RS:1;7b4ec1980089:36153 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T16:56:20,777 INFO [RS:1;7b4ec1980089:36153 {}] quotas.RegionServerRpcQuotaManager(68): Initializing RPC quota support 2024-12-09T16:56:20,779 DEBUG [RS:0;7b4ec1980089:34213 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T16:56:20,779 DEBUG [RS:0;7b4ec1980089:34213 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T16:56:20,779 DEBUG [RS:0;7b4ec1980089:34213 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7b4ec1980089,34213,1733763377986 2024-12-09T16:56:20,780 DEBUG [RS:0;7b4ec1980089:34213 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7b4ec1980089,34213,1733763377986' 2024-12-09T16:56:20,780 DEBUG [RS:0;7b4ec1980089:34213 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T16:56:20,781 INFO [RS:1;7b4ec1980089:36153 {}] hbase.ChoreService(168): Chore ScheduledChore name=QuotaRefresherChore, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T16:56:20,781 DEBUG [RS:0;7b4ec1980089:34213 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T16:56:20,782 DEBUG [RS:1;7b4ec1980089:36153 {}] zookeeper.ZKUtil(347): regionserver:36153-0x1000b8f605d0002, quorum=127.0.0.1:54803, baseZNode=/hbase Unable to get data of znode /hbase/rpc-throttle because node does not exist (not an error) 2024-12-09T16:56:20,782 INFO [RS:1;7b4ec1980089:36153 {}] quotas.RegionServerRpcQuotaManager(74): Start rpc quota manager and rpc throttle enabled is true 2024-12-09T16:56:20,783 INFO [RS:1;7b4ec1980089:36153 {}] hbase.ChoreService(168): Chore ScheduledChore name=SpaceQuotaRefresherChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T16:56:20,783 DEBUG [RS:0;7b4ec1980089:34213 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T16:56:20,783 INFO [RS:0;7b4ec1980089:34213 {}] quotas.RegionServerRpcQuotaManager(68): Initializing RPC quota support 2024-12-09T16:56:20,784 INFO [RS:1;7b4ec1980089:36153 {}] hbase.ChoreService(168): Chore ScheduledChore name=RegionSizeReportingChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T16:56:20,784 INFO [RS:0;7b4ec1980089:34213 {}] hbase.ChoreService(168): Chore ScheduledChore name=QuotaRefresherChore, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T16:56:20,785 DEBUG [RS:0;7b4ec1980089:34213 {}] zookeeper.ZKUtil(347): regionserver:34213-0x1000b8f605d0001, quorum=127.0.0.1:54803, baseZNode=/hbase Unable to get data of znode /hbase/rpc-throttle because node does not exist (not an error) 2024-12-09T16:56:20,785 INFO [RS:0;7b4ec1980089:34213 {}] quotas.RegionServerRpcQuotaManager(74): Start rpc quota manager and rpc throttle enabled is true 2024-12-09T16:56:20,785 INFO [RS:0;7b4ec1980089:34213 {}] hbase.ChoreService(168): Chore ScheduledChore name=SpaceQuotaRefresherChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T16:56:20,786 INFO [RS:0;7b4ec1980089:34213 {}] hbase.ChoreService(168): Chore ScheduledChore name=RegionSizeReportingChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T16:56:20,811 WARN [7b4ec1980089:36647 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-09T16:56:20,890 INFO [RS:0;7b4ec1980089:34213 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-09T16:56:20,890 INFO [RS:1;7b4ec1980089:36153 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-09T16:56:20,893 INFO [RS:0;7b4ec1980089:34213 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7b4ec1980089%2C34213%2C1733763377986, suffix=, logDir=hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/WALs/7b4ec1980089,34213,1733763377986, archiveDir=hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/oldWALs, maxLogs=32 2024-12-09T16:56:20,899 INFO [RS:1;7b4ec1980089:36153 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7b4ec1980089%2C36153%2C1733763378299, suffix=, logDir=hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/WALs/7b4ec1980089,36153,1733763378299, archiveDir=hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/oldWALs, maxLogs=32 2024-12-09T16:56:20,916 DEBUG [RS:0;7b4ec1980089:34213 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/WALs/7b4ec1980089,34213,1733763377986/7b4ec1980089%2C34213%2C1733763377986.1733763380896, exclude list is [], retry=0 2024-12-09T16:56:20,920 DEBUG [RS:1;7b4ec1980089:36153 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/WALs/7b4ec1980089,36153,1733763378299/7b4ec1980089%2C36153%2C1733763378299.1733763380901, exclude list is [], retry=0 2024-12-09T16:56:20,923 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32855,DS-0ead2a8b-c0ef-4a72-b147-a2a8eecbb657,DISK] 2024-12-09T16:56:20,923 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39355,DS-e0df493e-6ab7-4aa1-86df-3f605eb019bb,DISK] 2024-12-09T16:56:20,927 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32855,DS-0ead2a8b-c0ef-4a72-b147-a2a8eecbb657,DISK] 2024-12-09T16:56:20,928 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39355,DS-e0df493e-6ab7-4aa1-86df-3f605eb019bb,DISK] 2024-12-09T16:56:20,936 INFO [RS:1;7b4ec1980089:36153 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/WALs/7b4ec1980089,36153,1733763378299/7b4ec1980089%2C36153%2C1733763378299.1733763380901 2024-12-09T16:56:20,936 INFO [RS:0;7b4ec1980089:34213 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/WALs/7b4ec1980089,34213,1733763377986/7b4ec1980089%2C34213%2C1733763377986.1733763380896 2024-12-09T16:56:20,939 DEBUG [RS:1;7b4ec1980089:36153 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37237:37237),(127.0.0.1/127.0.0.1:33341:33341)] 2024-12-09T16:56:20,944 DEBUG [RS:0;7b4ec1980089:34213 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37237:37237),(127.0.0.1/127.0.0.1:33341:33341)] 2024-12-09T16:56:21,064 DEBUG [7b4ec1980089:36647 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=2, allServersCount=2 2024-12-09T16:56:21,073 DEBUG [7b4ec1980089:36647 {}] balancer.BalancerClusterState(204): Hosts are {7b4ec1980089=0} racks are {/default-rack=0} 2024-12-09T16:56:21,082 DEBUG [7b4ec1980089:36647 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T16:56:21,082 DEBUG [7b4ec1980089:36647 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T16:56:21,082 DEBUG [7b4ec1980089:36647 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T16:56:21,082 DEBUG [7b4ec1980089:36647 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T16:56:21,082 INFO [7b4ec1980089:36647 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T16:56:21,083 INFO [7b4ec1980089:36647 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T16:56:21,083 DEBUG [7b4ec1980089:36647 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T16:56:21,093 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=7b4ec1980089,36153,1733763378299 2024-12-09T16:56:21,102 INFO [PEWorker-4 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7b4ec1980089,36153,1733763378299, state=OPENING 2024-12-09T16:56:21,144 DEBUG [PEWorker-4 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T16:56:21,152 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36647-0x1000b8f605d0000, quorum=127.0.0.1:54803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:56:21,152 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34213-0x1000b8f605d0001, quorum=127.0.0.1:54803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:56:21,152 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36153-0x1000b8f605d0002, quorum=127.0.0.1:54803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:56:21,153 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T16:56:21,153 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T16:56:21,154 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T16:56:21,155 DEBUG [PEWorker-4 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T16:56:21,157 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=7b4ec1980089,36153,1733763378299}] 2024-12-09T16:56:21,334 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T16:56:21,336 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54303, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T16:56:21,348 INFO [RS_OPEN_META-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T16:56:21,348 INFO [RS_OPEN_META-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T16:56:21,348 INFO [RS_OPEN_META-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-09T16:56:21,352 INFO [RS_OPEN_META-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7b4ec1980089%2C36153%2C1733763378299.meta, suffix=.meta, logDir=hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/WALs/7b4ec1980089,36153,1733763378299, archiveDir=hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/oldWALs, maxLogs=32 2024-12-09T16:56:21,373 DEBUG [RS_OPEN_META-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/WALs/7b4ec1980089,36153,1733763378299/7b4ec1980089%2C36153%2C1733763378299.meta.1733763381354.meta, exclude list is [], retry=0 2024-12-09T16:56:21,377 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39355,DS-e0df493e-6ab7-4aa1-86df-3f605eb019bb,DISK] 2024-12-09T16:56:21,377 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32855,DS-0ead2a8b-c0ef-4a72-b147-a2a8eecbb657,DISK] 2024-12-09T16:56:21,382 INFO [RS_OPEN_META-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/WALs/7b4ec1980089,36153,1733763378299/7b4ec1980089%2C36153%2C1733763378299.meta.1733763381354.meta 2024-12-09T16:56:21,382 DEBUG [RS_OPEN_META-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33341:33341),(127.0.0.1/127.0.0.1:37237:37237)] 2024-12-09T16:56:21,383 DEBUG [RS_OPEN_META-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T16:56:21,385 DEBUG [RS_OPEN_META-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T16:56:21,387 DEBUG [RS_OPEN_META-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T16:56:21,391 INFO [RS_OPEN_META-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T16:56:21,396 DEBUG [RS_OPEN_META-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T16:56:21,396 DEBUG [RS_OPEN_META-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T16:56:21,396 DEBUG [RS_OPEN_META-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T16:56:21,397 DEBUG [RS_OPEN_META-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T16:56:21,400 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T16:56:21,402 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T16:56:21,402 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:56:21,403 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:56:21,403 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T16:56:21,405 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T16:56:21,405 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:56:21,407 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:56:21,407 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T16:56:21,409 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T16:56:21,409 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:56:21,410 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:56:21,410 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T16:56:21,412 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T16:56:21,412 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:56:21,413 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:56:21,413 DEBUG [RS_OPEN_META-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T16:56:21,415 DEBUG [RS_OPEN_META-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/hbase/meta/1588230740 2024-12-09T16:56:21,418 DEBUG [RS_OPEN_META-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/hbase/meta/1588230740 2024-12-09T16:56:21,423 DEBUG [RS_OPEN_META-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T16:56:21,423 DEBUG [RS_OPEN_META-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T16:56:21,424 DEBUG [RS_OPEN_META-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T16:56:21,426 DEBUG [RS_OPEN_META-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T16:56:21,428 INFO [RS_OPEN_META-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65416204, jitterRate=-0.025222599506378174}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T16:56:21,428 DEBUG [RS_OPEN_META-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T16:56:21,429 DEBUG [RS_OPEN_META-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733763381397Writing region info on filesystem at 1733763381397Initializing all the Stores at 1733763381399 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733763381399Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733763381400 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733763381400Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733763381400Cleaning up temporary data from old regions at 1733763381423 (+23 ms)Running coprocessor post-open hooks at 1733763381428 (+5 ms)Region opened successfully at 1733763381429 (+1 ms) 2024-12-09T16:56:21,437 INFO [RS_OPEN_META-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733763381326 2024-12-09T16:56:21,451 DEBUG [RS_OPEN_META-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T16:56:21,451 INFO [RS_OPEN_META-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T16:56:21,454 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=7b4ec1980089,36153,1733763378299 2024-12-09T16:56:21,459 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7b4ec1980089,36153,1733763378299, state=OPEN 2024-12-09T16:56:21,494 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36647-0x1000b8f605d0000, quorum=127.0.0.1:54803, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T16:56:21,494 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34213-0x1000b8f605d0001, quorum=127.0.0.1:54803, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T16:56:21,494 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T16:56:21,494 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T16:56:21,494 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36153-0x1000b8f605d0002, quorum=127.0.0.1:54803, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T16:56:21,495 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T16:56:21,495 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=7b4ec1980089,36153,1733763378299 2024-12-09T16:56:21,501 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T16:56:21,502 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=7b4ec1980089,36153,1733763378299 in 338 msec 2024-12-09T16:56:21,512 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T16:56:21,512 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 861 msec 2024-12-09T16:56:21,514 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T16:56:21,514 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T16:56:21,540 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T16:56:21,543 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7b4ec1980089,36153,1733763378299, seqNum=-1] 2024-12-09T16:56:21,569 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T16:56:21,572 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46873, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T16:56:21,613 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.3740 sec 2024-12-09T16:56:21,613 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733763381613, completionTime=-1 2024-12-09T16:56:21,617 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=2; waited=0ms, expected min=2 server(s), max=2 server(s), master is running 2024-12-09T16:56:21,617 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T16:56:21,649 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=2 2024-12-09T16:56:21,649 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733763441649 2024-12-09T16:56:21,649 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733763501649 2024-12-09T16:56:21,649 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 32 msec 2024-12-09T16:56:21,651 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-09T16:56:21,668 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7b4ec1980089,36647,1733763376785-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T16:56:21,668 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7b4ec1980089,36647,1733763376785-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T16:56:21,669 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7b4ec1980089,36647,1733763376785-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T16:56:21,671 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-7b4ec1980089:36647, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T16:56:21,671 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T16:56:21,676 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T16:56:21,689 DEBUG [master/7b4ec1980089:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T16:56:21,711 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 3.048sec 2024-12-09T16:56:21,716 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] quotas.MasterQuotaManager(103): Quota table not found. Creating... 2024-12-09T16:56:21,717 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] master.HMaster(2490): Client=null/null create 'hbase:quota', {NAME => 'q', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'u', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T16:56:21,723 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=hbase:quota 2024-12-09T16:56:21,724 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] quotas.MasterQuotaManager(107): Initializing quota support 2024-12-09T16:56:21,726 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] namespace.NamespaceStateManager(60): Namespace State Manager started. 2024-12-09T16:56:21,727 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=hbase:quota execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T16:56:21,728 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:56:21,732 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=hbase:quota execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T16:56:21,743 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] namespace.NamespaceStateManager(237): Finished updating state of 2 namespaces. 2024-12-09T16:56:21,743 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] namespace.NamespaceAuditor(50): NamespaceAuditor started. 2024-12-09T16:56:21,745 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=QuotaObserverChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T16:56:21,745 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=QuotaObserverChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T16:56:21,747 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T16:56:21,748 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T16:56:21,748 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T16:56:21,749 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T16:56:21,749 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7b4ec1980089,36647,1733763376785-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T16:56:21,750 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7b4ec1980089,36647,1733763376785-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T16:56:21,778 DEBUG [master/7b4ec1980089:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T16:56:21,779 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T16:56:21,780 INFO [master/7b4ec1980089:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7b4ec1980089,36647,1733763376785-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T16:56:21,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39355 is added to blk_1073741836_1012 (size=624) 2024-12-09T16:56:21,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32855 is added to blk_1073741836_1012 (size=624) 2024-12-09T16:56:21,805 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c8af654, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T16:56:21,805 INFO [RegionOpenAndInit-hbase:quota-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 59396aaaa39ec96658e63a448b643a55, NAME => 'hbase:quota,,1733763381716.59396aaaa39ec96658e63a448b643a55.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:quota', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'q', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'u', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd 2024-12-09T16:56:21,809 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-09T16:56:21,809 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-09T16:56:21,846 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 7b4ec1980089,36647,-1 for getting cluster id 2024-12-09T16:56:21,888 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T16:56:21,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39355 is added to blk_1073741837_1013 (size=38) 2024-12-09T16:56:21,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32855 is added to blk_1073741837_1013 (size=38) 2024-12-09T16:56:21,896 DEBUG [RegionOpenAndInit-hbase:quota-pool-0 {}] regionserver.HRegion(898): Instantiated hbase:quota,,1733763381716.59396aaaa39ec96658e63a448b643a55.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T16:56:21,896 DEBUG [RegionOpenAndInit-hbase:quota-pool-0 {}] regionserver.HRegion(1722): Closing 59396aaaa39ec96658e63a448b643a55, disabling compactions & flushes 2024-12-09T16:56:21,896 INFO [RegionOpenAndInit-hbase:quota-pool-0 {}] regionserver.HRegion(1755): Closing region hbase:quota,,1733763381716.59396aaaa39ec96658e63a448b643a55. 2024-12-09T16:56:21,896 DEBUG [RegionOpenAndInit-hbase:quota-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:quota,,1733763381716.59396aaaa39ec96658e63a448b643a55. 2024-12-09T16:56:21,896 DEBUG [RegionOpenAndInit-hbase:quota-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on hbase:quota,,1733763381716.59396aaaa39ec96658e63a448b643a55. after waiting 0 ms 2024-12-09T16:56:21,896 DEBUG [RegionOpenAndInit-hbase:quota-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region hbase:quota,,1733763381716.59396aaaa39ec96658e63a448b643a55. 2024-12-09T16:56:21,896 INFO [RegionOpenAndInit-hbase:quota-pool-0 {}] regionserver.HRegion(1973): Closed hbase:quota,,1733763381716.59396aaaa39ec96658e63a448b643a55. 2024-12-09T16:56:21,896 DEBUG [RegionOpenAndInit-hbase:quota-pool-0 {}] regionserver.HRegion(1676): Region close journal for 59396aaaa39ec96658e63a448b643a55: Waiting for close lock at 1733763381896Disabling compacts and flushes for region at 1733763381896Disabling writes for close at 1733763381896Writing region close event to WAL at 1733763381896Closed at 1733763381896 2024-12-09T16:56:21,900 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=hbase:quota execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T16:56:21,908 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"hbase:quota,,1733763381716.59396aaaa39ec96658e63a448b643a55.","families":{"info":[{"qualifier":"regioninfo","vlen":37,"tag":[],"timestamp":"1733763381901"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733763381901"}]},"ts":"1733763381901"} 2024-12-09T16:56:21,920 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-09T16:56:21,935 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=hbase:quota execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T16:56:21,936 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '69b0da9d-401b-4853-8997-55479f4db3e3' 2024-12-09T16:56:21,937 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:quota","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733763381935"}]},"ts":"1733763381935"} 2024-12-09T16:56:21,938 DEBUG [RPCClient-NioEventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T16:56:21,938 DEBUG [RPCClient-NioEventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "69b0da9d-401b-4853-8997-55479f4db3e3" 2024-12-09T16:56:21,938 DEBUG [RPCClient-NioEventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5144131e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T16:56:21,939 DEBUG [RPCClient-NioEventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [7b4ec1980089,36647,-1] 2024-12-09T16:56:21,942 DEBUG [RPCClient-NioEventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T16:56:21,942 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:quota, state=ENABLING in hbase:meta 2024-12-09T16:56:21,943 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {7b4ec1980089=0} racks are {/default-rack=0} 2024-12-09T16:56:21,944 DEBUG [RPCClient-NioEventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T16:56:21,947 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T16:56:21,947 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T16:56:21,947 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T16:56:21,948 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T16:56:21,948 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T16:56:21,948 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T16:56:21,948 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T16:56:21,949 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41352, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T16:56:21,951 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:quota, region=59396aaaa39ec96658e63a448b643a55, ASSIGN}] 2024-12-09T16:56:21,954 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:quota, region=59396aaaa39ec96658e63a448b643a55, ASSIGN 2024-12-09T16:56:21,956 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@209322c6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T16:56:21,956 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T16:56:21,959 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:quota, region=59396aaaa39ec96658e63a448b643a55, ASSIGN; state=OFFLINE, location=7b4ec1980089,34213,1733763377986; forceNewPlan=false, retain=false 2024-12-09T16:56:21,965 DEBUG [RPCClient-NioEventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7b4ec1980089,36153,1733763378299, seqNum=-1] 2024-12-09T16:56:21,965 DEBUG [RPCClient-NioEventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T16:56:21,978 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37562, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T16:56:22,004 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=7b4ec1980089,36647,1733763376785 2024-12-09T16:56:22,005 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T16:56:22,113 INFO [7b4ec1980089:36647 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-09T16:56:22,113 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=59396aaaa39ec96658e63a448b643a55, regionState=OPENING, regionLocation=7b4ec1980089,34213,1733763377986 2024-12-09T16:56:22,118 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:quota, region=59396aaaa39ec96658e63a448b643a55, ASSIGN because future has completed 2024-12-09T16:56:22,122 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 59396aaaa39ec96658e63a448b643a55, server=7b4ec1980089,34213,1733763377986}] 2024-12-09T16:56:22,280 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T16:56:22,282 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45083, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T16:56:22,291 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(132): Open hbase:quota,,1733763381716.59396aaaa39ec96658e63a448b643a55. 2024-12-09T16:56:22,291 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 59396aaaa39ec96658e63a448b643a55, NAME => 'hbase:quota,,1733763381716.59396aaaa39ec96658e63a448b643a55.', STARTKEY => '', ENDKEY => ''} 2024-12-09T16:56:22,292 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table quota 59396aaaa39ec96658e63a448b643a55 2024-12-09T16:56:22,292 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(898): Instantiated hbase:quota,,1733763381716.59396aaaa39ec96658e63a448b643a55.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T16:56:22,292 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 59396aaaa39ec96658e63a448b643a55 2024-12-09T16:56:22,293 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 59396aaaa39ec96658e63a448b643a55 2024-12-09T16:56:22,295 INFO [StoreOpener-59396aaaa39ec96658e63a448b643a55-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family q of region 59396aaaa39ec96658e63a448b643a55 2024-12-09T16:56:22,300 INFO [StoreOpener-59396aaaa39ec96658e63a448b643a55-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 59396aaaa39ec96658e63a448b643a55 columnFamilyName q 2024-12-09T16:56:22,300 DEBUG [StoreOpener-59396aaaa39ec96658e63a448b643a55-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:56:22,302 INFO [StoreOpener-59396aaaa39ec96658e63a448b643a55-1 {}] regionserver.HStore(327): Store=59396aaaa39ec96658e63a448b643a55/q, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T16:56:22,302 INFO [StoreOpener-59396aaaa39ec96658e63a448b643a55-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family u of region 59396aaaa39ec96658e63a448b643a55 2024-12-09T16:56:22,307 INFO [StoreOpener-59396aaaa39ec96658e63a448b643a55-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 59396aaaa39ec96658e63a448b643a55 columnFamilyName u 2024-12-09T16:56:22,308 DEBUG [StoreOpener-59396aaaa39ec96658e63a448b643a55-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:56:22,309 INFO [StoreOpener-59396aaaa39ec96658e63a448b643a55-1 {}] regionserver.HStore(327): Store=59396aaaa39ec96658e63a448b643a55/u, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T16:56:22,309 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 59396aaaa39ec96658e63a448b643a55 2024-12-09T16:56:22,311 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/hbase/quota/59396aaaa39ec96658e63a448b643a55 2024-12-09T16:56:22,313 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/hbase/quota/59396aaaa39ec96658e63a448b643a55 2024-12-09T16:56:22,314 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 59396aaaa39ec96658e63a448b643a55 2024-12-09T16:56:22,315 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 59396aaaa39ec96658e63a448b643a55 2024-12-09T16:56:22,316 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:quota descriptor;using region.getMemStoreFlushHeapSize/# of families (64.0 M)) instead. 2024-12-09T16:56:22,320 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 59396aaaa39ec96658e63a448b643a55 2024-12-09T16:56:22,328 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/hbase/quota/59396aaaa39ec96658e63a448b643a55/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T16:56:22,330 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1114): Opened 59396aaaa39ec96658e63a448b643a55; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60437069, jitterRate=-0.09941749274730682}}}, FlushLargeStoresPolicy{flushSizeLowerBound=67108864} 2024-12-09T16:56:22,330 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 59396aaaa39ec96658e63a448b643a55 2024-12-09T16:56:22,332 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 59396aaaa39ec96658e63a448b643a55: Running coprocessor pre-open hook at 1733763382293Writing region info on filesystem at 1733763382293Initializing all the Stores at 1733763382294 (+1 ms)Instantiating store for column family {NAME => 'q', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733763382294Instantiating store for column family {NAME => 'u', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733763382295 (+1 ms)Cleaning up temporary data from old regions at 1733763382315 (+20 ms)Running coprocessor post-open hooks at 1733763382330 (+15 ms)Region opened successfully at 1733763382332 (+2 ms) 2024-12-09T16:56:22,335 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:quota,,1733763381716.59396aaaa39ec96658e63a448b643a55., pid=6, masterSystemTime=1733763382280 2024-12-09T16:56:22,339 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:quota,,1733763381716.59396aaaa39ec96658e63a448b643a55. 2024-12-09T16:56:22,340 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(153): Opened hbase:quota,,1733763381716.59396aaaa39ec96658e63a448b643a55. 2024-12-09T16:56:22,343 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=59396aaaa39ec96658e63a448b643a55, regionState=OPEN, openSeqNum=2, regionLocation=7b4ec1980089,34213,1733763377986 2024-12-09T16:56:22,348 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 59396aaaa39ec96658e63a448b643a55, server=7b4ec1980089,34213,1733763377986 because future has completed 2024-12-09T16:56:22,356 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-09T16:56:22,357 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 59396aaaa39ec96658e63a448b643a55, server=7b4ec1980089,34213,1733763377986 in 228 msec 2024-12-09T16:56:22,361 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-09T16:56:22,362 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:quota, region=59396aaaa39ec96658e63a448b643a55, ASSIGN in 405 msec 2024-12-09T16:56:22,363 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=hbase:quota execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T16:56:22,364 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:quota","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733763382363"}]},"ts":"1733763382363"} 2024-12-09T16:56:22,367 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:quota, state=ENABLED in hbase:meta 2024-12-09T16:56:22,369 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=hbase:quota execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T16:56:22,386 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=hbase:quota in 652 msec 2024-12-09T16:56:22,426 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=hbase:quota,, stopping at row=hbase:quota ,, for max=2147483647 with caching=100 2024-12-09T16:56:22,437 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-09T16:56:22,450 DEBUG [RPCClient-NioEventLoopGroup-5-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 7b4ec1980089,36647,1733763376785 2024-12-09T16:56:22,452 DEBUG [RPCClient-NioEventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@24aa7bf2 2024-12-09T16:56:22,457 DEBUG [RPCClient-NioEventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T16:56:22,461 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41354, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T16:56:22,466 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=hbase:quota,, stopping at row=hbase:quota ,, for max=2147483647 with caching=100 2024-12-09T16:56:22,476 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T16:56:22,492 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38640, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T16:56:22,510 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestQuotaAdmin0', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T16:56:22,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestQuotaAdmin0 2024-12-09T16:56:22,519 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestQuotaAdmin0 execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T16:56:22,521 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:56:22,523 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestQuotaAdmin0 execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T16:56:22,525 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestQuotaAdmin0" procId is: 7 2024-12-09T16:56:22,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T16:56:22,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32855 is added to blk_1073741838_1014 (size=391) 2024-12-09T16:56:22,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39355 is added to blk_1073741838_1014 (size=391) 2024-12-09T16:56:22,563 INFO [RegionOpenAndInit-TestQuotaAdmin0-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => aef056b9db773e1b5b352ca79819f223, NAME => 'TestQuotaAdmin0,,1733763382505.aef056b9db773e1b5b352ca79819f223.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestQuotaAdmin0', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd 2024-12-09T16:56:22,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32855 is added to blk_1073741839_1015 (size=50) 2024-12-09T16:56:22,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39355 is added to blk_1073741839_1015 (size=50) 2024-12-09T16:56:22,601 DEBUG [RegionOpenAndInit-TestQuotaAdmin0-pool-0 {}] regionserver.HRegion(898): Instantiated TestQuotaAdmin0,,1733763382505.aef056b9db773e1b5b352ca79819f223.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T16:56:22,601 DEBUG [RegionOpenAndInit-TestQuotaAdmin0-pool-0 {}] regionserver.HRegion(1722): Closing aef056b9db773e1b5b352ca79819f223, disabling compactions & flushes 2024-12-09T16:56:22,601 INFO [RegionOpenAndInit-TestQuotaAdmin0-pool-0 {}] regionserver.HRegion(1755): Closing region TestQuotaAdmin0,,1733763382505.aef056b9db773e1b5b352ca79819f223. 2024-12-09T16:56:22,601 DEBUG [RegionOpenAndInit-TestQuotaAdmin0-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestQuotaAdmin0,,1733763382505.aef056b9db773e1b5b352ca79819f223. 2024-12-09T16:56:22,601 DEBUG [RegionOpenAndInit-TestQuotaAdmin0-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestQuotaAdmin0,,1733763382505.aef056b9db773e1b5b352ca79819f223. after waiting 0 ms 2024-12-09T16:56:22,601 DEBUG [RegionOpenAndInit-TestQuotaAdmin0-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestQuotaAdmin0,,1733763382505.aef056b9db773e1b5b352ca79819f223. 2024-12-09T16:56:22,601 INFO [RegionOpenAndInit-TestQuotaAdmin0-pool-0 {}] regionserver.HRegion(1973): Closed TestQuotaAdmin0,,1733763382505.aef056b9db773e1b5b352ca79819f223. 2024-12-09T16:56:22,601 DEBUG [RegionOpenAndInit-TestQuotaAdmin0-pool-0 {}] regionserver.HRegion(1676): Region close journal for aef056b9db773e1b5b352ca79819f223: Waiting for close lock at 1733763382601Disabling compacts and flushes for region at 1733763382601Disabling writes for close at 1733763382601Writing region close event to WAL at 1733763382601Closed at 1733763382601 2024-12-09T16:56:22,606 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestQuotaAdmin0 execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T16:56:22,606 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestQuotaAdmin0,,1733763382505.aef056b9db773e1b5b352ca79819f223.","families":{"info":[{"qualifier":"regioninfo","vlen":49,"tag":[],"timestamp":"1733763382606"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733763382606"}]},"ts":"1733763382606"} 2024-12-09T16:56:22,611 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-09T16:56:22,617 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestQuotaAdmin0 execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T16:56:22,617 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin0","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733763382617"}]},"ts":"1733763382617"} 2024-12-09T16:56:22,623 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin0, state=ENABLING in hbase:meta 2024-12-09T16:56:22,623 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {7b4ec1980089=0} racks are {/default-rack=0} 2024-12-09T16:56:22,625 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T16:56:22,625 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T16:56:22,625 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T16:56:22,625 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T16:56:22,625 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T16:56:22,625 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T16:56:22,625 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T16:56:22,626 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin0, region=aef056b9db773e1b5b352ca79819f223, ASSIGN}] 2024-12-09T16:56:22,630 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin0, region=aef056b9db773e1b5b352ca79819f223, ASSIGN 2024-12-09T16:56:22,633 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestQuotaAdmin0, region=aef056b9db773e1b5b352ca79819f223, ASSIGN; state=OFFLINE, location=7b4ec1980089,34213,1733763377986; forceNewPlan=false, retain=false 2024-12-09T16:56:22,785 INFO [7b4ec1980089:36647 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-09T16:56:22,785 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=aef056b9db773e1b5b352ca79819f223, regionState=OPENING, regionLocation=7b4ec1980089,34213,1733763377986 2024-12-09T16:56:22,789 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestQuotaAdmin0, region=aef056b9db773e1b5b352ca79819f223, ASSIGN because future has completed 2024-12-09T16:56:22,791 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure aef056b9db773e1b5b352ca79819f223, server=7b4ec1980089,34213,1733763377986}] 2024-12-09T16:56:22,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T16:56:22,955 INFO [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] handler.AssignRegionHandler(132): Open TestQuotaAdmin0,,1733763382505.aef056b9db773e1b5b352ca79819f223. 2024-12-09T16:56:22,955 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(7752): Opening region: {ENCODED => aef056b9db773e1b5b352ca79819f223, NAME => 'TestQuotaAdmin0,,1733763382505.aef056b9db773e1b5b352ca79819f223.', STARTKEY => '', ENDKEY => ''} 2024-12-09T16:56:22,955 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestQuotaAdmin0 aef056b9db773e1b5b352ca79819f223 2024-12-09T16:56:22,956 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(898): Instantiated TestQuotaAdmin0,,1733763382505.aef056b9db773e1b5b352ca79819f223.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T16:56:22,956 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(7794): checking encryption for aef056b9db773e1b5b352ca79819f223 2024-12-09T16:56:22,956 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(7797): checking classloading for aef056b9db773e1b5b352ca79819f223 2024-12-09T16:56:22,959 INFO [StoreOpener-aef056b9db773e1b5b352ca79819f223-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region aef056b9db773e1b5b352ca79819f223 2024-12-09T16:56:22,962 INFO [StoreOpener-aef056b9db773e1b5b352ca79819f223-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region aef056b9db773e1b5b352ca79819f223 columnFamilyName cf 2024-12-09T16:56:22,962 DEBUG [StoreOpener-aef056b9db773e1b5b352ca79819f223-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:56:22,963 INFO [StoreOpener-aef056b9db773e1b5b352ca79819f223-1 {}] regionserver.HStore(327): Store=aef056b9db773e1b5b352ca79819f223/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T16:56:22,965 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1038): replaying wal for aef056b9db773e1b5b352ca79819f223 2024-12-09T16:56:22,968 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/default/TestQuotaAdmin0/aef056b9db773e1b5b352ca79819f223 2024-12-09T16:56:22,970 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/default/TestQuotaAdmin0/aef056b9db773e1b5b352ca79819f223 2024-12-09T16:56:22,971 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1048): stopping wal replay for aef056b9db773e1b5b352ca79819f223 2024-12-09T16:56:22,971 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1060): Cleaning up temporary data for aef056b9db773e1b5b352ca79819f223 2024-12-09T16:56:22,974 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1093): writing seq id for aef056b9db773e1b5b352ca79819f223 2024-12-09T16:56:22,980 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/default/TestQuotaAdmin0/aef056b9db773e1b5b352ca79819f223/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T16:56:22,982 INFO [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1114): Opened aef056b9db773e1b5b352ca79819f223; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72823253, jitterRate=0.08515103161334991}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T16:56:22,982 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1122): Running coprocessor post-open hooks for aef056b9db773e1b5b352ca79819f223 2024-12-09T16:56:22,984 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1006): Region open journal for aef056b9db773e1b5b352ca79819f223: Running coprocessor pre-open hook at 1733763382956Writing region info on filesystem at 1733763382956Initializing all the Stores at 1733763382958 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733763382958Cleaning up temporary data from old regions at 1733763382971 (+13 ms)Running coprocessor post-open hooks at 1733763382982 (+11 ms)Region opened successfully at 1733763382984 (+2 ms) 2024-12-09T16:56:22,986 INFO [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegionServer(2236): Post open deploy tasks for TestQuotaAdmin0,,1733763382505.aef056b9db773e1b5b352ca79819f223., pid=9, masterSystemTime=1733763382946 2024-12-09T16:56:22,992 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=aef056b9db773e1b5b352ca79819f223, regionState=OPEN, openSeqNum=2, regionLocation=7b4ec1980089,34213,1733763377986 2024-12-09T16:56:22,994 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegionServer(2266): Finished post open deploy task for TestQuotaAdmin0,,1733763382505.aef056b9db773e1b5b352ca79819f223. 2024-12-09T16:56:22,994 INFO [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] handler.AssignRegionHandler(153): Opened TestQuotaAdmin0,,1733763382505.aef056b9db773e1b5b352ca79819f223. 2024-12-09T16:56:22,998 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure aef056b9db773e1b5b352ca79819f223, server=7b4ec1980089,34213,1733763377986 because future has completed 2024-12-09T16:56:23,011 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-12-09T16:56:23,011 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; OpenRegionProcedure aef056b9db773e1b5b352ca79819f223, server=7b4ec1980089,34213,1733763377986 in 211 msec 2024-12-09T16:56:23,032 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-09T16:56:23,032 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin0, region=aef056b9db773e1b5b352ca79819f223, ASSIGN in 386 msec 2024-12-09T16:56:23,034 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestQuotaAdmin0 execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T16:56:23,034 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin0","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733763383034"}]},"ts":"1733763383034"} 2024-12-09T16:56:23,040 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin0, state=ENABLED in hbase:meta 2024-12-09T16:56:23,042 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestQuotaAdmin0 execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T16:56:23,049 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestQuotaAdmin0 in 530 msec 2024-12-09T16:56:23,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T16:56:23,305 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestQuotaAdmin0 get assigned. Timeout = 60000ms 2024-12-09T16:56:23,305 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestQuotaAdmin0 completed 2024-12-09T16:56:23,306 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T16:56:23,312 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestQuotaAdmin0 assigned to meta. Checking AM states. 2024-12-09T16:56:23,313 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T16:56:23,313 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestQuotaAdmin0 assigned. 2024-12-09T16:56:23,314 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T16:56:23,316 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestQuotaAdmin0,, stopping at row=TestQuotaAdmin0 ,, for max=2147483647 with caching=100 2024-12-09T16:56:23,322 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestQuotaAdmin0,, stopping at row=TestQuotaAdmin0 ,, for max=2147483647 with caching=100 2024-12-09T16:56:23,357 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestQuotaAdmin1', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T16:56:23,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] procedure2.ProcedureExecutor(1139): Stored pid=10, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestQuotaAdmin1 2024-12-09T16:56:23,360 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=10, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestQuotaAdmin1 execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T16:56:23,361 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:56:23,361 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestQuotaAdmin1" procId is: 10 2024-12-09T16:56:23,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=10 2024-12-09T16:56:23,362 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=10, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestQuotaAdmin1 execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T16:56:23,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32855 is added to blk_1073741840_1016 (size=391) 2024-12-09T16:56:23,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39355 is added to blk_1073741840_1016 (size=391) 2024-12-09T16:56:23,383 INFO [RegionOpenAndInit-TestQuotaAdmin1-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 90b87e721837483af81762da8fa8eee2, NAME => 'TestQuotaAdmin1,,1733763383356.90b87e721837483af81762da8fa8eee2.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestQuotaAdmin1', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd 2024-12-09T16:56:23,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39355 is added to blk_1073741841_1017 (size=50) 2024-12-09T16:56:23,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32855 is added to blk_1073741841_1017 (size=50) 2024-12-09T16:56:23,400 DEBUG [RegionOpenAndInit-TestQuotaAdmin1-pool-0 {}] regionserver.HRegion(898): Instantiated TestQuotaAdmin1,,1733763383356.90b87e721837483af81762da8fa8eee2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T16:56:23,400 DEBUG [RegionOpenAndInit-TestQuotaAdmin1-pool-0 {}] regionserver.HRegion(1722): Closing 90b87e721837483af81762da8fa8eee2, disabling compactions & flushes 2024-12-09T16:56:23,400 INFO [RegionOpenAndInit-TestQuotaAdmin1-pool-0 {}] regionserver.HRegion(1755): Closing region TestQuotaAdmin1,,1733763383356.90b87e721837483af81762da8fa8eee2. 2024-12-09T16:56:23,400 DEBUG [RegionOpenAndInit-TestQuotaAdmin1-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestQuotaAdmin1,,1733763383356.90b87e721837483af81762da8fa8eee2. 2024-12-09T16:56:23,400 DEBUG [RegionOpenAndInit-TestQuotaAdmin1-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestQuotaAdmin1,,1733763383356.90b87e721837483af81762da8fa8eee2. after waiting 0 ms 2024-12-09T16:56:23,400 DEBUG [RegionOpenAndInit-TestQuotaAdmin1-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestQuotaAdmin1,,1733763383356.90b87e721837483af81762da8fa8eee2. 2024-12-09T16:56:23,400 INFO [RegionOpenAndInit-TestQuotaAdmin1-pool-0 {}] regionserver.HRegion(1973): Closed TestQuotaAdmin1,,1733763383356.90b87e721837483af81762da8fa8eee2. 2024-12-09T16:56:23,400 DEBUG [RegionOpenAndInit-TestQuotaAdmin1-pool-0 {}] regionserver.HRegion(1676): Region close journal for 90b87e721837483af81762da8fa8eee2: Waiting for close lock at 1733763383400Disabling compacts and flushes for region at 1733763383400Disabling writes for close at 1733763383400Writing region close event to WAL at 1733763383400Closed at 1733763383400 2024-12-09T16:56:23,402 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=10, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestQuotaAdmin1 execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T16:56:23,402 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestQuotaAdmin1,,1733763383356.90b87e721837483af81762da8fa8eee2.","families":{"info":[{"qualifier":"regioninfo","vlen":49,"tag":[],"timestamp":"1733763383402"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733763383402"}]},"ts":"1733763383402"} 2024-12-09T16:56:23,407 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-09T16:56:23,409 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=10, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestQuotaAdmin1 execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T16:56:23,410 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733763383409"}]},"ts":"1733763383409"} 2024-12-09T16:56:23,413 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin1, state=ENABLING in hbase:meta 2024-12-09T16:56:23,413 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {7b4ec1980089=0} racks are {/default-rack=0} 2024-12-09T16:56:23,416 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T16:56:23,416 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T16:56:23,416 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T16:56:23,416 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T16:56:23,416 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T16:56:23,416 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T16:56:23,416 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T16:56:23,416 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin1, region=90b87e721837483af81762da8fa8eee2, ASSIGN}] 2024-12-09T16:56:23,419 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=10, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin1, region=90b87e721837483af81762da8fa8eee2, ASSIGN 2024-12-09T16:56:23,421 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=10, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestQuotaAdmin1, region=90b87e721837483af81762da8fa8eee2, ASSIGN; state=OFFLINE, location=7b4ec1980089,36153,1733763378299; forceNewPlan=false, retain=false 2024-12-09T16:56:23,571 INFO [7b4ec1980089:36647 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-09T16:56:23,572 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=90b87e721837483af81762da8fa8eee2, regionState=OPENING, regionLocation=7b4ec1980089,36153,1733763378299 2024-12-09T16:56:23,580 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=10, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestQuotaAdmin1, region=90b87e721837483af81762da8fa8eee2, ASSIGN because future has completed 2024-12-09T16:56:23,599 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 90b87e721837483af81762da8fa8eee2, server=7b4ec1980089,36153,1733763378299}] 2024-12-09T16:56:23,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=10 2024-12-09T16:56:23,765 INFO [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestQuotaAdmin1,,1733763383356.90b87e721837483af81762da8fa8eee2. 2024-12-09T16:56:23,766 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => 90b87e721837483af81762da8fa8eee2, NAME => 'TestQuotaAdmin1,,1733763383356.90b87e721837483af81762da8fa8eee2.', STARTKEY => '', ENDKEY => ''} 2024-12-09T16:56:23,766 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestQuotaAdmin1 90b87e721837483af81762da8fa8eee2 2024-12-09T16:56:23,766 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestQuotaAdmin1,,1733763383356.90b87e721837483af81762da8fa8eee2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T16:56:23,766 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for 90b87e721837483af81762da8fa8eee2 2024-12-09T16:56:23,766 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for 90b87e721837483af81762da8fa8eee2 2024-12-09T16:56:23,769 INFO [StoreOpener-90b87e721837483af81762da8fa8eee2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 90b87e721837483af81762da8fa8eee2 2024-12-09T16:56:23,777 INFO [StoreOpener-90b87e721837483af81762da8fa8eee2-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 90b87e721837483af81762da8fa8eee2 columnFamilyName cf 2024-12-09T16:56:23,777 DEBUG [StoreOpener-90b87e721837483af81762da8fa8eee2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:56:23,778 INFO [StoreOpener-90b87e721837483af81762da8fa8eee2-1 {}] regionserver.HStore(327): Store=90b87e721837483af81762da8fa8eee2/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T16:56:23,779 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for 90b87e721837483af81762da8fa8eee2 2024-12-09T16:56:23,780 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/default/TestQuotaAdmin1/90b87e721837483af81762da8fa8eee2 2024-12-09T16:56:23,782 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/default/TestQuotaAdmin1/90b87e721837483af81762da8fa8eee2 2024-12-09T16:56:23,784 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for 90b87e721837483af81762da8fa8eee2 2024-12-09T16:56:23,784 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for 90b87e721837483af81762da8fa8eee2 2024-12-09T16:56:23,791 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for 90b87e721837483af81762da8fa8eee2 2024-12-09T16:56:23,804 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/default/TestQuotaAdmin1/90b87e721837483af81762da8fa8eee2/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T16:56:23,805 INFO [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened 90b87e721837483af81762da8fa8eee2; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74408769, jitterRate=0.10877706110477448}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T16:56:23,806 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 90b87e721837483af81762da8fa8eee2 2024-12-09T16:56:23,807 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for 90b87e721837483af81762da8fa8eee2: Running coprocessor pre-open hook at 1733763383767Writing region info on filesystem at 1733763383767Initializing all the Stores at 1733763383769 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733763383769Cleaning up temporary data from old regions at 1733763383785 (+16 ms)Running coprocessor post-open hooks at 1733763383806 (+21 ms)Region opened successfully at 1733763383807 (+1 ms) 2024-12-09T16:56:23,809 INFO [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestQuotaAdmin1,,1733763383356.90b87e721837483af81762da8fa8eee2., pid=12, masterSystemTime=1733763383757 2024-12-09T16:56:23,815 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestQuotaAdmin1,,1733763383356.90b87e721837483af81762da8fa8eee2. 2024-12-09T16:56:23,815 INFO [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestQuotaAdmin1,,1733763383356.90b87e721837483af81762da8fa8eee2. 2024-12-09T16:56:23,816 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=90b87e721837483af81762da8fa8eee2, regionState=OPEN, openSeqNum=2, regionLocation=7b4ec1980089,36153,1733763378299 2024-12-09T16:56:23,821 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 90b87e721837483af81762da8fa8eee2, server=7b4ec1980089,36153,1733763378299 because future has completed 2024-12-09T16:56:23,834 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-12-09T16:56:23,835 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure 90b87e721837483af81762da8fa8eee2, server=7b4ec1980089,36153,1733763378299 in 228 msec 2024-12-09T16:56:23,841 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=10 2024-12-09T16:56:23,841 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=10, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin1, region=90b87e721837483af81762da8fa8eee2, ASSIGN in 418 msec 2024-12-09T16:56:23,843 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=10, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestQuotaAdmin1 execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T16:56:23,844 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733763383844"}]},"ts":"1733763383844"} 2024-12-09T16:56:23,848 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin1, state=ENABLED in hbase:meta 2024-12-09T16:56:23,850 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=10, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestQuotaAdmin1 execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T16:56:23,858 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestQuotaAdmin1 in 494 msec 2024-12-09T16:56:24,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=10 2024-12-09T16:56:24,133 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestQuotaAdmin1 completed 2024-12-09T16:56:24,133 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestQuotaAdmin1 get assigned. Timeout = 60000ms 2024-12-09T16:56:24,134 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T16:56:24,139 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestQuotaAdmin1 assigned to meta. Checking AM states. 2024-12-09T16:56:24,139 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T16:56:24,139 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestQuotaAdmin1 assigned. 2024-12-09T16:56:24,139 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T16:56:24,142 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestQuotaAdmin1,, stopping at row=TestQuotaAdmin1 ,, for max=2147483647 with caching=100 2024-12-09T16:56:24,148 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestQuotaAdmin1,, stopping at row=TestQuotaAdmin1 ,, for max=2147483647 with caching=100 2024-12-09T16:56:24,156 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestQuotaAdmin2', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T16:56:24,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestQuotaAdmin2 2024-12-09T16:56:24,160 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=13, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestQuotaAdmin2 execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T16:56:24,160 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:56:24,161 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestQuotaAdmin2" procId is: 13 2024-12-09T16:56:24,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-12-09T16:56:24,162 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=13, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestQuotaAdmin2 execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T16:56:24,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32855 is added to blk_1073741842_1018 (size=391) 2024-12-09T16:56:24,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39355 is added to blk_1073741842_1018 (size=391) 2024-12-09T16:56:24,174 INFO [RegionOpenAndInit-TestQuotaAdmin2-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => cd004b0305056e9cbc7135ba0180b1d6, NAME => 'TestQuotaAdmin2,,1733763384156.cd004b0305056e9cbc7135ba0180b1d6.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestQuotaAdmin2', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd 2024-12-09T16:56:24,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39355 is added to blk_1073741843_1019 (size=50) 2024-12-09T16:56:24,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32855 is added to blk_1073741843_1019 (size=50) 2024-12-09T16:56:24,189 DEBUG [RegionOpenAndInit-TestQuotaAdmin2-pool-0 {}] regionserver.HRegion(898): Instantiated TestQuotaAdmin2,,1733763384156.cd004b0305056e9cbc7135ba0180b1d6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T16:56:24,189 DEBUG [RegionOpenAndInit-TestQuotaAdmin2-pool-0 {}] regionserver.HRegion(1722): Closing cd004b0305056e9cbc7135ba0180b1d6, disabling compactions & flushes 2024-12-09T16:56:24,189 INFO [RegionOpenAndInit-TestQuotaAdmin2-pool-0 {}] regionserver.HRegion(1755): Closing region TestQuotaAdmin2,,1733763384156.cd004b0305056e9cbc7135ba0180b1d6. 2024-12-09T16:56:24,189 DEBUG [RegionOpenAndInit-TestQuotaAdmin2-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestQuotaAdmin2,,1733763384156.cd004b0305056e9cbc7135ba0180b1d6. 2024-12-09T16:56:24,189 DEBUG [RegionOpenAndInit-TestQuotaAdmin2-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestQuotaAdmin2,,1733763384156.cd004b0305056e9cbc7135ba0180b1d6. after waiting 0 ms 2024-12-09T16:56:24,190 DEBUG [RegionOpenAndInit-TestQuotaAdmin2-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestQuotaAdmin2,,1733763384156.cd004b0305056e9cbc7135ba0180b1d6. 2024-12-09T16:56:24,190 INFO [RegionOpenAndInit-TestQuotaAdmin2-pool-0 {}] regionserver.HRegion(1973): Closed TestQuotaAdmin2,,1733763384156.cd004b0305056e9cbc7135ba0180b1d6. 2024-12-09T16:56:24,190 DEBUG [RegionOpenAndInit-TestQuotaAdmin2-pool-0 {}] regionserver.HRegion(1676): Region close journal for cd004b0305056e9cbc7135ba0180b1d6: Waiting for close lock at 1733763384189Disabling compacts and flushes for region at 1733763384189Disabling writes for close at 1733763384189Writing region close event to WAL at 1733763384190 (+1 ms)Closed at 1733763384190 2024-12-09T16:56:24,192 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=13, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestQuotaAdmin2 execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T16:56:24,193 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestQuotaAdmin2,,1733763384156.cd004b0305056e9cbc7135ba0180b1d6.","families":{"info":[{"qualifier":"regioninfo","vlen":49,"tag":[],"timestamp":"1733763384192"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733763384192"}]},"ts":"1733763384192"} 2024-12-09T16:56:24,202 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-09T16:56:24,203 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=13, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestQuotaAdmin2 execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T16:56:24,204 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin2","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733763384204"}]},"ts":"1733763384204"} 2024-12-09T16:56:24,206 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin2, state=ENABLING in hbase:meta 2024-12-09T16:56:24,207 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {7b4ec1980089=0} racks are {/default-rack=0} 2024-12-09T16:56:24,209 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T16:56:24,209 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T16:56:24,209 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T16:56:24,209 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T16:56:24,209 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T16:56:24,209 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T16:56:24,209 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T16:56:24,210 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin2, region=cd004b0305056e9cbc7135ba0180b1d6, ASSIGN}] 2024-12-09T16:56:24,212 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=14, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin2, region=cd004b0305056e9cbc7135ba0180b1d6, ASSIGN 2024-12-09T16:56:24,214 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=14, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestQuotaAdmin2, region=cd004b0305056e9cbc7135ba0180b1d6, ASSIGN; state=OFFLINE, location=7b4ec1980089,34213,1733763377986; forceNewPlan=false, retain=false 2024-12-09T16:56:24,365 INFO [7b4ec1980089:36647 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-09T16:56:24,365 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=14 updating hbase:meta row=cd004b0305056e9cbc7135ba0180b1d6, regionState=OPENING, regionLocation=7b4ec1980089,34213,1733763377986 2024-12-09T16:56:24,370 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=14, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestQuotaAdmin2, region=cd004b0305056e9cbc7135ba0180b1d6, ASSIGN because future has completed 2024-12-09T16:56:24,371 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE, hasLock=false; OpenRegionProcedure cd004b0305056e9cbc7135ba0180b1d6, server=7b4ec1980089,34213,1733763377986}] 2024-12-09T16:56:24,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-12-09T16:56:24,531 INFO [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] handler.AssignRegionHandler(132): Open TestQuotaAdmin2,,1733763384156.cd004b0305056e9cbc7135ba0180b1d6. 2024-12-09T16:56:24,531 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(7752): Opening region: {ENCODED => cd004b0305056e9cbc7135ba0180b1d6, NAME => 'TestQuotaAdmin2,,1733763384156.cd004b0305056e9cbc7135ba0180b1d6.', STARTKEY => '', ENDKEY => ''} 2024-12-09T16:56:24,532 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestQuotaAdmin2 cd004b0305056e9cbc7135ba0180b1d6 2024-12-09T16:56:24,532 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(898): Instantiated TestQuotaAdmin2,,1733763384156.cd004b0305056e9cbc7135ba0180b1d6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T16:56:24,532 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(7794): checking encryption for cd004b0305056e9cbc7135ba0180b1d6 2024-12-09T16:56:24,532 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(7797): checking classloading for cd004b0305056e9cbc7135ba0180b1d6 2024-12-09T16:56:24,536 INFO [StoreOpener-cd004b0305056e9cbc7135ba0180b1d6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region cd004b0305056e9cbc7135ba0180b1d6 2024-12-09T16:56:24,538 INFO [StoreOpener-cd004b0305056e9cbc7135ba0180b1d6-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cd004b0305056e9cbc7135ba0180b1d6 columnFamilyName cf 2024-12-09T16:56:24,538 DEBUG [StoreOpener-cd004b0305056e9cbc7135ba0180b1d6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:56:24,539 INFO [StoreOpener-cd004b0305056e9cbc7135ba0180b1d6-1 {}] regionserver.HStore(327): Store=cd004b0305056e9cbc7135ba0180b1d6/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T16:56:24,539 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1038): replaying wal for cd004b0305056e9cbc7135ba0180b1d6 2024-12-09T16:56:24,540 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/default/TestQuotaAdmin2/cd004b0305056e9cbc7135ba0180b1d6 2024-12-09T16:56:24,541 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/default/TestQuotaAdmin2/cd004b0305056e9cbc7135ba0180b1d6 2024-12-09T16:56:24,542 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1048): stopping wal replay for cd004b0305056e9cbc7135ba0180b1d6 2024-12-09T16:56:24,542 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1060): Cleaning up temporary data for cd004b0305056e9cbc7135ba0180b1d6 2024-12-09T16:56:24,547 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1093): writing seq id for cd004b0305056e9cbc7135ba0180b1d6 2024-12-09T16:56:24,559 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/default/TestQuotaAdmin2/cd004b0305056e9cbc7135ba0180b1d6/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T16:56:24,560 INFO [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1114): Opened cd004b0305056e9cbc7135ba0180b1d6; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61745360, jitterRate=-0.07992243766784668}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T16:56:24,560 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1122): Running coprocessor post-open hooks for cd004b0305056e9cbc7135ba0180b1d6 2024-12-09T16:56:24,561 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1006): Region open journal for cd004b0305056e9cbc7135ba0180b1d6: Running coprocessor pre-open hook at 1733763384532Writing region info on filesystem at 1733763384532Initializing all the Stores at 1733763384535 (+3 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733763384535Cleaning up temporary data from old regions at 1733763384542 (+7 ms)Running coprocessor post-open hooks at 1733763384561 (+19 ms)Region opened successfully at 1733763384561 2024-12-09T16:56:24,563 INFO [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegionServer(2236): Post open deploy tasks for TestQuotaAdmin2,,1733763384156.cd004b0305056e9cbc7135ba0180b1d6., pid=15, masterSystemTime=1733763384525 2024-12-09T16:56:24,567 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=14 updating hbase:meta row=cd004b0305056e9cbc7135ba0180b1d6, regionState=OPEN, openSeqNum=2, regionLocation=7b4ec1980089,34213,1733763377986 2024-12-09T16:56:24,568 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegionServer(2266): Finished post open deploy task for TestQuotaAdmin2,,1733763384156.cd004b0305056e9cbc7135ba0180b1d6. 2024-12-09T16:56:24,568 INFO [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] handler.AssignRegionHandler(153): Opened TestQuotaAdmin2,,1733763384156.cd004b0305056e9cbc7135ba0180b1d6. 2024-12-09T16:56:24,572 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=15, ppid=14, state=RUNNABLE, hasLock=false; OpenRegionProcedure cd004b0305056e9cbc7135ba0180b1d6, server=7b4ec1980089,34213,1733763377986 because future has completed 2024-12-09T16:56:24,581 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=15, resume processing ppid=14 2024-12-09T16:56:24,581 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=15, ppid=14, state=SUCCESS, hasLock=false; OpenRegionProcedure cd004b0305056e9cbc7135ba0180b1d6, server=7b4ec1980089,34213,1733763377986 in 204 msec 2024-12-09T16:56:24,592 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-12-09T16:56:24,592 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin2, region=cd004b0305056e9cbc7135ba0180b1d6, ASSIGN in 371 msec 2024-12-09T16:56:24,595 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=13, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestQuotaAdmin2 execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T16:56:24,596 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin2","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733763384596"}]},"ts":"1733763384596"} 2024-12-09T16:56:24,599 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin2, state=ENABLED in hbase:meta 2024-12-09T16:56:24,603 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=13, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestQuotaAdmin2 execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T16:56:24,607 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestQuotaAdmin2 in 447 msec 2024-12-09T16:56:24,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-12-09T16:56:24,934 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestQuotaAdmin2 completed 2024-12-09T16:56:24,934 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestQuotaAdmin2 get assigned. Timeout = 60000ms 2024-12-09T16:56:24,934 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T16:56:24,941 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestQuotaAdmin2 assigned to meta. Checking AM states. 2024-12-09T16:56:24,941 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T16:56:24,941 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestQuotaAdmin2 assigned. 2024-12-09T16:56:24,942 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T16:56:24,945 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestQuotaAdmin2,, stopping at row=TestQuotaAdmin2 ,, for max=2147483647 with caching=100 2024-12-09T16:56:24,952 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestQuotaAdmin2,, stopping at row=TestQuotaAdmin2 ,, for max=2147483647 with caching=100 2024-12-09T16:56:24,962 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] master.HMaster$18(3529): Client=jenkins//172.17.0.2 creating {NAME => 'TestNs'} 2024-12-09T16:56:24,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] procedure2.ProcedureExecutor(1139): Stored pid=16, state=RUNNABLE:CREATE_NAMESPACE_PREPARE, hasLock=false; CreateNamespaceProcedure, namespace=TestNs 2024-12-09T16:56:24,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=16 2024-12-09T16:56:24,985 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=16, state=SUCCESS, hasLock=false; CreateNamespaceProcedure, namespace=TestNs in 19 msec 2024-12-09T16:56:25,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=16 2024-12-09T16:56:25,234 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$NamespaceProcedureBiConsumer(2745): Operation: CREATE_NAMESPACE, Namespace: TestNs completed 2024-12-09T16:56:25,235 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestNs:TestTable', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T16:56:25,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] procedure2.ProcedureExecutor(1139): Stored pid=17, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestNs:TestTable 2024-12-09T16:56:25,239 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=17, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestNs:TestTable execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T16:56:25,240 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:56:25,240 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "TestNs" qualifier: "TestTable" procId is: 17 2024-12-09T16:56:25,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=17 2024-12-09T16:56:25,242 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=17, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestNs:TestTable execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T16:56:25,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39355 is added to blk_1073741844_1020 (size=358) 2024-12-09T16:56:25,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32855 is added to blk_1073741844_1020 (size=358) 2024-12-09T16:56:25,280 INFO [RegionOpenAndInit-TestNs:TestTable-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => c9cfeb687a0dd64ffa1b3e9c8c9d924c, NAME => 'TestNs:TestTable,,1733763385234.c9cfeb687a0dd64ffa1b3e9c8c9d924c.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='TestNs:TestTable', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd 2024-12-09T16:56:25,280 INFO [RegionOpenAndInit-TestNs:TestTable-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 434d27bd1b99346c64f656cd110c7695, NAME => 'TestNs:TestTable,1,1733763385234.434d27bd1b99346c64f656cd110c7695.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='TestNs:TestTable', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd 2024-12-09T16:56:25,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32855 is added to blk_1073741845_1021 (size=44) 2024-12-09T16:56:25,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39355 is added to blk_1073741845_1021 (size=44) 2024-12-09T16:56:25,321 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-0 {}] regionserver.HRegion(898): Instantiated TestNs:TestTable,,1733763385234.c9cfeb687a0dd64ffa1b3e9c8c9d924c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T16:56:25,322 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-0 {}] regionserver.HRegion(1722): Closing c9cfeb687a0dd64ffa1b3e9c8c9d924c, disabling compactions & flushes 2024-12-09T16:56:25,322 INFO [RegionOpenAndInit-TestNs:TestTable-pool-0 {}] regionserver.HRegion(1755): Closing region TestNs:TestTable,,1733763385234.c9cfeb687a0dd64ffa1b3e9c8c9d924c. 2024-12-09T16:56:25,322 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestNs:TestTable,,1733763385234.c9cfeb687a0dd64ffa1b3e9c8c9d924c. 2024-12-09T16:56:25,322 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestNs:TestTable,,1733763385234.c9cfeb687a0dd64ffa1b3e9c8c9d924c. after waiting 0 ms 2024-12-09T16:56:25,322 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestNs:TestTable,,1733763385234.c9cfeb687a0dd64ffa1b3e9c8c9d924c. 2024-12-09T16:56:25,322 INFO [RegionOpenAndInit-TestNs:TestTable-pool-0 {}] regionserver.HRegion(1973): Closed TestNs:TestTable,,1733763385234.c9cfeb687a0dd64ffa1b3e9c8c9d924c. 2024-12-09T16:56:25,322 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-0 {}] regionserver.HRegion(1676): Region close journal for c9cfeb687a0dd64ffa1b3e9c8c9d924c: Waiting for close lock at 1733763385322Disabling compacts and flushes for region at 1733763385322Disabling writes for close at 1733763385322Writing region close event to WAL at 1733763385322Closed at 1733763385322 2024-12-09T16:56:25,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32855 is added to blk_1073741846_1022 (size=44) 2024-12-09T16:56:25,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39355 is added to blk_1073741846_1022 (size=44) 2024-12-09T16:56:25,342 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-1 {}] regionserver.HRegion(898): Instantiated TestNs:TestTable,1,1733763385234.434d27bd1b99346c64f656cd110c7695.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T16:56:25,343 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-1 {}] regionserver.HRegion(1722): Closing 434d27bd1b99346c64f656cd110c7695, disabling compactions & flushes 2024-12-09T16:56:25,343 INFO [RegionOpenAndInit-TestNs:TestTable-pool-1 {}] regionserver.HRegion(1755): Closing region TestNs:TestTable,1,1733763385234.434d27bd1b99346c64f656cd110c7695. 2024-12-09T16:56:25,343 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestNs:TestTable,1,1733763385234.434d27bd1b99346c64f656cd110c7695. 2024-12-09T16:56:25,343 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on TestNs:TestTable,1,1733763385234.434d27bd1b99346c64f656cd110c7695. after waiting 0 ms 2024-12-09T16:56:25,343 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region TestNs:TestTable,1,1733763385234.434d27bd1b99346c64f656cd110c7695. 2024-12-09T16:56:25,343 INFO [RegionOpenAndInit-TestNs:TestTable-pool-1 {}] regionserver.HRegion(1973): Closed TestNs:TestTable,1,1733763385234.434d27bd1b99346c64f656cd110c7695. 2024-12-09T16:56:25,343 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-1 {}] regionserver.HRegion(1676): Region close journal for 434d27bd1b99346c64f656cd110c7695: Waiting for close lock at 1733763385342Disabling compacts and flushes for region at 1733763385343 (+1 ms)Disabling writes for close at 1733763385343Writing region close event to WAL at 1733763385343Closed at 1733763385343 2024-12-09T16:56:25,345 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=17, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestNs:TestTable execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T16:56:25,346 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestNs:TestTable,,1733763385234.c9cfeb687a0dd64ffa1b3e9c8c9d924c.","families":{"info":[{"qualifier":"regioninfo","vlen":43,"tag":[],"timestamp":"1733763385346"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733763385346"}]},"ts":"1733763385346"} 2024-12-09T16:56:25,346 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestNs:TestTable,1,1733763385234.434d27bd1b99346c64f656cd110c7695.","families":{"info":[{"qualifier":"regioninfo","vlen":43,"tag":[],"timestamp":"1733763385346"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733763385346"}]},"ts":"1733763385346"} 2024-12-09T16:56:25,393 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-09T16:56:25,396 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=17, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestNs:TestTable execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T16:56:25,396 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestNs:TestTable","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733763385396"}]},"ts":"1733763385396"} 2024-12-09T16:56:25,404 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestNs:TestTable, state=ENABLING in hbase:meta 2024-12-09T16:56:25,404 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {7b4ec1980089=0} racks are {/default-rack=0} 2024-12-09T16:56:25,410 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T16:56:25,410 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T16:56:25,410 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T16:56:25,410 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T16:56:25,410 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T16:56:25,410 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T16:56:25,410 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T16:56:25,411 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=18, ppid=17, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=c9cfeb687a0dd64ffa1b3e9c8c9d924c, ASSIGN}, {pid=19, ppid=17, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=434d27bd1b99346c64f656cd110c7695, ASSIGN}] 2024-12-09T16:56:25,413 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=19, ppid=17, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=434d27bd1b99346c64f656cd110c7695, ASSIGN 2024-12-09T16:56:25,414 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=18, ppid=17, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=c9cfeb687a0dd64ffa1b3e9c8c9d924c, ASSIGN 2024-12-09T16:56:25,418 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=19, ppid=17, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestNs:TestTable, region=434d27bd1b99346c64f656cd110c7695, ASSIGN; state=OFFLINE, location=7b4ec1980089,34213,1733763377986; forceNewPlan=false, retain=false 2024-12-09T16:56:25,421 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=18, ppid=17, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestNs:TestTable, region=c9cfeb687a0dd64ffa1b3e9c8c9d924c, ASSIGN; state=OFFLINE, location=7b4ec1980089,36153,1733763378299; forceNewPlan=false, retain=false 2024-12-09T16:56:25,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=17 2024-12-09T16:56:25,568 INFO [7b4ec1980089:36647 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-09T16:56:25,569 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=19 updating hbase:meta row=434d27bd1b99346c64f656cd110c7695, regionState=OPENING, regionLocation=7b4ec1980089,34213,1733763377986 2024-12-09T16:56:25,569 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=18 updating hbase:meta row=c9cfeb687a0dd64ffa1b3e9c8c9d924c, regionState=OPENING, regionLocation=7b4ec1980089,36153,1733763378299 2024-12-09T16:56:25,579 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=18, ppid=17, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestNs:TestTable, region=c9cfeb687a0dd64ffa1b3e9c8c9d924c, ASSIGN because future has completed 2024-12-09T16:56:25,579 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=20, ppid=18, state=RUNNABLE, hasLock=false; OpenRegionProcedure c9cfeb687a0dd64ffa1b3e9c8c9d924c, server=7b4ec1980089,36153,1733763378299}] 2024-12-09T16:56:25,585 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=19, ppid=17, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestNs:TestTable, region=434d27bd1b99346c64f656cd110c7695, ASSIGN because future has completed 2024-12-09T16:56:25,624 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=21, ppid=19, state=RUNNABLE, hasLock=false; OpenRegionProcedure 434d27bd1b99346c64f656cd110c7695, server=7b4ec1980089,34213,1733763377986}] 2024-12-09T16:56:25,783 INFO [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] handler.AssignRegionHandler(132): Open TestNs:TestTable,,1733763385234.c9cfeb687a0dd64ffa1b3e9c8c9d924c. 2024-12-09T16:56:25,783 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7752): Opening region: {ENCODED => c9cfeb687a0dd64ffa1b3e9c8c9d924c, NAME => 'TestNs:TestTable,,1733763385234.c9cfeb687a0dd64ffa1b3e9c8c9d924c.', STARTKEY => '', ENDKEY => '1'} 2024-12-09T16:56:25,784 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestTable c9cfeb687a0dd64ffa1b3e9c8c9d924c 2024-12-09T16:56:25,784 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(898): Instantiated TestNs:TestTable,,1733763385234.c9cfeb687a0dd64ffa1b3e9c8c9d924c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T16:56:25,784 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7794): checking encryption for c9cfeb687a0dd64ffa1b3e9c8c9d924c 2024-12-09T16:56:25,784 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7797): checking classloading for c9cfeb687a0dd64ffa1b3e9c8c9d924c 2024-12-09T16:56:25,786 INFO [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] handler.AssignRegionHandler(132): Open TestNs:TestTable,1,1733763385234.434d27bd1b99346c64f656cd110c7695. 2024-12-09T16:56:25,787 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(7752): Opening region: {ENCODED => 434d27bd1b99346c64f656cd110c7695, NAME => 'TestNs:TestTable,1,1733763385234.434d27bd1b99346c64f656cd110c7695.', STARTKEY => '1', ENDKEY => ''} 2024-12-09T16:56:25,788 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestTable 434d27bd1b99346c64f656cd110c7695 2024-12-09T16:56:25,788 INFO [StoreOpener-c9cfeb687a0dd64ffa1b3e9c8c9d924c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region c9cfeb687a0dd64ffa1b3e9c8c9d924c 2024-12-09T16:56:25,788 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(898): Instantiated TestNs:TestTable,1,1733763385234.434d27bd1b99346c64f656cd110c7695.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T16:56:25,788 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(7794): checking encryption for 434d27bd1b99346c64f656cd110c7695 2024-12-09T16:56:25,788 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(7797): checking classloading for 434d27bd1b99346c64f656cd110c7695 2024-12-09T16:56:25,790 INFO [StoreOpener-c9cfeb687a0dd64ffa1b3e9c8c9d924c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c9cfeb687a0dd64ffa1b3e9c8c9d924c columnFamilyName cf 2024-12-09T16:56:25,790 DEBUG [StoreOpener-c9cfeb687a0dd64ffa1b3e9c8c9d924c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:56:25,791 INFO [StoreOpener-c9cfeb687a0dd64ffa1b3e9c8c9d924c-1 {}] regionserver.HStore(327): Store=c9cfeb687a0dd64ffa1b3e9c8c9d924c/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T16:56:25,792 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1038): replaying wal for c9cfeb687a0dd64ffa1b3e9c8c9d924c 2024-12-09T16:56:25,793 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/TestNs/TestTable/c9cfeb687a0dd64ffa1b3e9c8c9d924c 2024-12-09T16:56:25,794 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/TestNs/TestTable/c9cfeb687a0dd64ffa1b3e9c8c9d924c 2024-12-09T16:56:25,796 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1048): stopping wal replay for c9cfeb687a0dd64ffa1b3e9c8c9d924c 2024-12-09T16:56:25,796 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1060): Cleaning up temporary data for c9cfeb687a0dd64ffa1b3e9c8c9d924c 2024-12-09T16:56:25,797 INFO [StoreOpener-434d27bd1b99346c64f656cd110c7695-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 434d27bd1b99346c64f656cd110c7695 2024-12-09T16:56:25,800 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1093): writing seq id for c9cfeb687a0dd64ffa1b3e9c8c9d924c 2024-12-09T16:56:25,800 INFO [StoreOpener-434d27bd1b99346c64f656cd110c7695-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 434d27bd1b99346c64f656cd110c7695 columnFamilyName cf 2024-12-09T16:56:25,800 DEBUG [StoreOpener-434d27bd1b99346c64f656cd110c7695-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:56:25,802 INFO [StoreOpener-434d27bd1b99346c64f656cd110c7695-1 {}] regionserver.HStore(327): Store=434d27bd1b99346c64f656cd110c7695/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T16:56:25,803 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1038): replaying wal for 434d27bd1b99346c64f656cd110c7695 2024-12-09T16:56:25,804 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/TestNs/TestTable/c9cfeb687a0dd64ffa1b3e9c8c9d924c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T16:56:25,805 INFO [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1114): Opened c9cfeb687a0dd64ffa1b3e9c8c9d924c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59844674, jitterRate=-0.1082448661327362}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T16:56:25,805 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c9cfeb687a0dd64ffa1b3e9c8c9d924c 2024-12-09T16:56:25,806 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/TestNs/TestTable/434d27bd1b99346c64f656cd110c7695 2024-12-09T16:56:25,807 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1006): Region open journal for c9cfeb687a0dd64ffa1b3e9c8c9d924c: Running coprocessor pre-open hook at 1733763385784Writing region info on filesystem at 1733763385784Initializing all the Stores at 1733763385787 (+3 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733763385787Cleaning up temporary data from old regions at 1733763385796 (+9 ms)Running coprocessor post-open hooks at 1733763385805 (+9 ms)Region opened successfully at 1733763385806 (+1 ms) 2024-12-09T16:56:25,809 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/TestNs/TestTable/434d27bd1b99346c64f656cd110c7695 2024-12-09T16:56:25,810 INFO [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegionServer(2236): Post open deploy tasks for TestNs:TestTable,,1733763385234.c9cfeb687a0dd64ffa1b3e9c8c9d924c., pid=20, masterSystemTime=1733763385777 2024-12-09T16:56:25,810 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1048): stopping wal replay for 434d27bd1b99346c64f656cd110c7695 2024-12-09T16:56:25,810 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1060): Cleaning up temporary data for 434d27bd1b99346c64f656cd110c7695 2024-12-09T16:56:25,814 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1093): writing seq id for 434d27bd1b99346c64f656cd110c7695 2024-12-09T16:56:25,814 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegionServer(2266): Finished post open deploy task for TestNs:TestTable,,1733763385234.c9cfeb687a0dd64ffa1b3e9c8c9d924c. 2024-12-09T16:56:25,814 INFO [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] handler.AssignRegionHandler(153): Opened TestNs:TestTable,,1733763385234.c9cfeb687a0dd64ffa1b3e9c8c9d924c. 2024-12-09T16:56:25,816 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=18 updating hbase:meta row=c9cfeb687a0dd64ffa1b3e9c8c9d924c, regionState=OPEN, openSeqNum=2, regionLocation=7b4ec1980089,36153,1733763378299 2024-12-09T16:56:25,820 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/TestNs/TestTable/434d27bd1b99346c64f656cd110c7695/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T16:56:25,822 INFO [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1114): Opened 434d27bd1b99346c64f656cd110c7695; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67270366, jitterRate=0.002406567335128784}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T16:56:25,822 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 434d27bd1b99346c64f656cd110c7695 2024-12-09T16:56:25,822 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=20, ppid=18, state=RUNNABLE, hasLock=false; OpenRegionProcedure c9cfeb687a0dd64ffa1b3e9c8c9d924c, server=7b4ec1980089,36153,1733763378299 because future has completed 2024-12-09T16:56:25,822 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1006): Region open journal for 434d27bd1b99346c64f656cd110c7695: Running coprocessor pre-open hook at 1733763385788Writing region info on filesystem at 1733763385788Initializing all the Stores at 1733763385797 (+9 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733763385797Cleaning up temporary data from old regions at 1733763385810 (+13 ms)Running coprocessor post-open hooks at 1733763385822 (+12 ms)Region opened successfully at 1733763385822 2024-12-09T16:56:25,826 INFO [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegionServer(2236): Post open deploy tasks for TestNs:TestTable,1,1733763385234.434d27bd1b99346c64f656cd110c7695., pid=21, masterSystemTime=1733763385780 2024-12-09T16:56:25,831 DEBUG [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegionServer(2266): Finished post open deploy task for TestNs:TestTable,1,1733763385234.434d27bd1b99346c64f656cd110c7695. 2024-12-09T16:56:25,831 INFO [RS_OPEN_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] handler.AssignRegionHandler(153): Opened TestNs:TestTable,1,1733763385234.434d27bd1b99346c64f656cd110c7695. 2024-12-09T16:56:25,831 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=20, resume processing ppid=18 2024-12-09T16:56:25,832 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=20, ppid=18, state=SUCCESS, hasLock=false; OpenRegionProcedure c9cfeb687a0dd64ffa1b3e9c8c9d924c, server=7b4ec1980089,36153,1733763378299 in 245 msec 2024-12-09T16:56:25,832 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=19 updating hbase:meta row=434d27bd1b99346c64f656cd110c7695, regionState=OPEN, openSeqNum=2, regionLocation=7b4ec1980089,34213,1733763377986 2024-12-09T16:56:25,837 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=21, ppid=19, state=RUNNABLE, hasLock=false; OpenRegionProcedure 434d27bd1b99346c64f656cd110c7695, server=7b4ec1980089,34213,1733763377986 because future has completed 2024-12-09T16:56:25,838 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=18, ppid=17, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=c9cfeb687a0dd64ffa1b3e9c8c9d924c, ASSIGN in 422 msec 2024-12-09T16:56:25,845 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=21, resume processing ppid=19 2024-12-09T16:56:25,845 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=21, ppid=19, state=SUCCESS, hasLock=false; OpenRegionProcedure 434d27bd1b99346c64f656cd110c7695, server=7b4ec1980089,34213,1733763377986 in 216 msec 2024-12-09T16:56:25,851 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=19, resume processing ppid=17 2024-12-09T16:56:25,851 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=19, ppid=17, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=434d27bd1b99346c64f656cd110c7695, ASSIGN in 435 msec 2024-12-09T16:56:25,854 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=17, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestNs:TestTable execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T16:56:25,854 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestNs:TestTable","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733763385854"}]},"ts":"1733763385854"} 2024-12-09T16:56:25,867 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestNs:TestTable, state=ENABLED in hbase:meta 2024-12-09T16:56:25,870 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=17, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestNs:TestTable execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T16:56:25,874 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=17, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestNs:TestTable in 635 msec 2024-12-09T16:56:26,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=17 2024-12-09T16:56:26,015 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: TestNs:TestTable completed 2024-12-09T16:56:26,015 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestNs:TestTable get assigned. Timeout = 60000ms 2024-12-09T16:56:26,015 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T16:56:26,026 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestNs:TestTable assigned to meta. Checking AM states. 2024-12-09T16:56:26,026 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T16:56:26,026 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestNs:TestTable assigned. 2024-12-09T16:56:26,027 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T16:56:26,034 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestNs:TestTable,, stopping at row=TestNs:TestTable ,, for max=2147483647 with caching=100 2024-12-09T16:56:26,051 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestNs:TestTable,, stopping at row=TestNs:TestTable ,, for max=2147483647 with caching=100 2024-12-09T16:56:26,111 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: quotas.TestClusterScopeQuotaThrottle#testUserTableClusterScopeQuota Thread=301, OpenFileDescriptor=535, MaxFileDescriptor=1048576, SystemLoadAverage=461, ProcessCount=11, AvailableMemoryMB=10182 2024-12-09T16:56:26,143 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:quota', row='u.jenkins', locateType=CURRENT is [region=hbase:quota,,1733763381716.59396aaaa39ec96658e63a448b643a55., hostname=7b4ec1980089,34213,1733763377986, seqNum=2] 2024-12-09T16:56:26,148 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T16:56:26,149 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49869, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T16:56:26,168 DEBUG [regionserver/7b4ec1980089:0.Chore.1 {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-09T16:56:26,168 DEBUG [regionserver/7b4ec1980089:0.Chore.1 {}] client.AsyncConnectionImpl(321): The fetched master address is 7b4ec1980089,36647,1733763376785 2024-12-09T16:56:26,168 DEBUG [regionserver/7b4ec1980089:0.Chore.1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@52a918e5 2024-12-09T16:56:26,180 DEBUG [regionserver/7b4ec1980089:0.Chore.1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T16:56:26,183 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42019, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=MasterService 2024-12-09T16:56:26,212 DEBUG [regionserver/7b4ec1980089:0.Chore.1 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T16:56:26,212 DEBUG [regionserver/7b4ec1980089:0.Chore.1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7b4ec1980089,36153,1733763378299, seqNum=-1] 2024-12-09T16:56:26,217 DEBUG [regionserver/7b4ec1980089:0.Chore.1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T16:56:26,220 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36145, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=ClientService 2024-12-09T16:56:26,226 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:quota', row='n.default', locateType=CURRENT is [region=hbase:quota,,1733763381716.59396aaaa39ec96658e63a448b643a55., hostname=7b4ec1980089,34213,1733763377986, seqNum=2] 2024-12-09T16:56:26,230 DEBUG [regionserver/7b4ec1980089:0.Chore.1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:quota', row='n.TestNs', locateType=CURRENT is [region=hbase:quota,,1733763381716.59396aaaa39ec96658e63a448b643a55., hostname=7b4ec1980089,34213,1733763377986, seqNum=2] 2024-12-09T16:56:26,231 DEBUG [regionserver/7b4ec1980089:0.Chore.1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T16:56:26,233 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51175, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=ClientService 2024-12-09T16:56:26,417 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T16:56:26,418 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-12-09T16:56:26,418 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1733766986165 bypass), TestNs=QuotaState(ts=1733766986165 bypass)} 2024-12-09T16:56:26,418 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestQuotaAdmin0=QuotaState(ts=1733766986165 bypass), TestNs:TestTable=QuotaState(ts=1733766986165 bypass), TestQuotaAdmin2=QuotaState(ts=1733766986165 bypass)} 2024-12-09T16:56:26,419 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1733766986165 [ TestNs:TestTable ])} 2024-12-09T16:56:26,419 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1733766986165 bypass)} 2024-12-09T16:56:26,419 DEBUG [regionserver/7b4ec1980089:0.Chore.1 {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-09T16:56:26,419 DEBUG [regionserver/7b4ec1980089:0.Chore.1 {}] client.AsyncConnectionImpl(321): The fetched master address is 7b4ec1980089,36647,1733763376785 2024-12-09T16:56:26,419 DEBUG [regionserver/7b4ec1980089:0.Chore.1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@b2e7267 2024-12-09T16:56:26,420 DEBUG [regionserver/7b4ec1980089:0.Chore.1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T16:56:26,421 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38325, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=MasterService 2024-12-09T16:56:26,425 DEBUG [regionserver/7b4ec1980089:0.Chore.1 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T16:56:26,425 DEBUG [regionserver/7b4ec1980089:0.Chore.1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7b4ec1980089,36153,1733763378299, seqNum=-1] 2024-12-09T16:56:26,425 DEBUG [regionserver/7b4ec1980089:0.Chore.1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T16:56:26,428 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48419, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=ClientService 2024-12-09T16:56:26,432 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:quota', row='n.TestNs', locateType=CURRENT is [region=hbase:quota,,1733763381716.59396aaaa39ec96658e63a448b643a55., hostname=7b4ec1980089,34213,1733763377986, seqNum=2] 2024-12-09T16:56:26,433 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T16:56:26,434 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46177, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=ClientService 2024-12-09T16:56:26,437 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:quota', row='n.default', locateType=CURRENT is [region=hbase:quota,,1733763381716.59396aaaa39ec96658e63a448b643a55., hostname=7b4ec1980089,34213,1733763377986, seqNum=2] 2024-12-09T16:56:26,669 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T16:56:26,669 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-12-09T16:56:26,669 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1733766986165 bypass), TestNs=QuotaState(ts=1733766986165 bypass)} 2024-12-09T16:56:26,670 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestNs:TestTable=QuotaState(ts=1733766986165 bypass), TestQuotaAdmin1=QuotaState(ts=1733766986165 bypass)} 2024-12-09T16:56:26,670 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1733766986165 [ TestNs:TestTable ])} 2024-12-09T16:56:26,670 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1733766986165 bypass)} 2024-12-09T16:56:26,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34213 {}] quotas.RegionServerRpcQuotaManager(245): Throttling exception for user=jenkins table=TestNs:TestTable numWrites=0 numReads=1: number of read requests exceeded - wait 6mins, 0ms 2024-12-09T16:56:26,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34213 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Get size: 118 connection: 172.17.0.2:38640 deadline: 1733763396705, exception=org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms 2024-12-09T16:56:26,732 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestNs:TestTable,1,1733763385234.434d27bd1b99346c64f656cd110c7695., hostname=7b4ec1980089,34213,1733763377986, seqNum=-1 , the old value is region=TestNs:TestTable,1,1733763385234.434d27bd1b99346c64f656cd110c7695., hostname=7b4ec1980089,34213,1733763377986, seqNum=-1, error=org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T16:56:26,733 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestNs:TestTable,1,1733763385234.434d27bd1b99346c64f656cd110c7695., hostname=7b4ec1980089,34213,1733763377986, seqNum=-1 is org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T16:56:26,733 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestNs:TestTable,1,1733763385234.434d27bd1b99346c64f656cd110c7695., hostname=7b4ec1980089,34213,1733763377986, seqNum=-1 because the exception is null or not the one we care about 2024-12-09T16:56:26,733 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] backoff.HBaseServerExceptionPauseManager(61): RpcThrottlingException suggested pause of 360000000000ns which would exceed the timeout. We should throw instead. org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-12-09T16:56:26,736 ERROR [Time-limited test {}] quotas.ThrottleQuotaTestUtil(110): get failed after nRetries=10 org.apache.hadoop.hbase.client.RetriesExhaustedException: Failed after attempts=1, exceptions: 2024-12-09T16:56:26.733Z, org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.TableOverAsyncTable.get(TableOverAsyncTable.java:188) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doGets(ThrottleQuotaTestUtil.java:105) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.testUserTableClusterScopeQuota(TestClusterScopeQuotaThrottle.java:224) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] at --------Future.get--------(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.completeExceptionally(AsyncRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.tryScheduleRetry(AsyncRpcRetryingCaller.java:130) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:215) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 42 more Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-12-09T16:56:26,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34213 {}] quotas.RegionServerRpcQuotaManager(245): Throttling exception for user=jenkins table=TestNs:TestTable numWrites=0 numReads=1: number of read requests exceeded - wait 6mins, 0ms 2024-12-09T16:56:26,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34213 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Get size: 117 connection: 172.17.0.2:38640 deadline: 1733763396738, exception=org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms 2024-12-09T16:56:26,740 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestNs:TestTable,1,1733763385234.434d27bd1b99346c64f656cd110c7695., hostname=7b4ec1980089,34213,1733763377986, seqNum=-1 , the old value is region=TestNs:TestTable,1,1733763385234.434d27bd1b99346c64f656cd110c7695., hostname=7b4ec1980089,34213,1733763377986, seqNum=-1, error=org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T16:56:26,740 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestNs:TestTable,1,1733763385234.434d27bd1b99346c64f656cd110c7695., hostname=7b4ec1980089,34213,1733763377986, seqNum=-1 is org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T16:56:26,741 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestNs:TestTable,1,1733763385234.434d27bd1b99346c64f656cd110c7695., hostname=7b4ec1980089,34213,1733763377986, seqNum=-1 because the exception is null or not the one we care about 2024-12-09T16:56:26,741 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] backoff.HBaseServerExceptionPauseManager(61): RpcThrottlingException suggested pause of 360000000000ns which would exceed the timeout. We should throw instead. org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-12-09T16:56:26,742 ERROR [Time-limited test {}] quotas.ThrottleQuotaTestUtil(110): get failed after nRetries=0 org.apache.hadoop.hbase.client.RetriesExhaustedException: Failed after attempts=1, exceptions: 2024-12-09T16:56:26.741Z, org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.TableOverAsyncTable.get(TableOverAsyncTable.java:188) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doGets(ThrottleQuotaTestUtil.java:105) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.testUserTableClusterScopeQuota(TestClusterScopeQuotaThrottle.java:224) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] at --------Future.get--------(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.completeExceptionally(AsyncRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.tryScheduleRetry(AsyncRpcRetryingCaller.java:130) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:215) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 42 more Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-12-09T16:56:26,910 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-09T16:56:26,966 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestQuotaAdmin0' 2024-12-09T16:56:26,969 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestNs:TestTable' 2024-12-09T16:56:26,970 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestQuotaAdmin2' 2024-12-09T16:56:26,970 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:quota' 2024-12-09T16:56:26,972 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-09T16:56:26,973 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestQuotaAdmin1' 2024-12-09T16:56:27,005 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T16:56:27,005 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-12-09T16:56:27,005 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1733770586165 bypass), TestNs=QuotaState(ts=1733770586165 bypass)} 2024-12-09T16:56:27,005 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestQuotaAdmin0=QuotaState(ts=1733770586165 bypass), TestNs:TestTable=QuotaState(ts=1733770586165 bypass), TestQuotaAdmin2=QuotaState(ts=1733770586165 bypass)} 2024-12-09T16:56:27,005 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1733770586165 bypass)} 2024-12-09T16:56:27,005 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1733770586165 bypass)} 2024-12-09T16:56:27,256 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T16:56:27,256 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-12-09T16:56:27,256 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1733770586165 bypass), TestNs=QuotaState(ts=1733770586165 bypass)} 2024-12-09T16:56:27,256 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestNs:TestTable=QuotaState(ts=1733770586165 bypass), TestQuotaAdmin1=QuotaState(ts=1733770586165 bypass)} 2024-12-09T16:56:27,256 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1733770586165 bypass)} 2024-12-09T16:56:27,256 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1733770586165 bypass)} 2024-12-09T16:56:27,269 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: quotas.TestClusterScopeQuotaThrottle#testUserTableClusterScopeQuota Thread=301 (was 301), OpenFileDescriptor=549 (was 535) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=461 (was 461), ProcessCount=11 (was 11), AvailableMemoryMB=10169 (was 10182) 2024-12-09T16:56:27,278 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: quotas.TestClusterScopeQuotaThrottle#testUserNamespaceClusterScopeQuota Thread=301, OpenFileDescriptor=549, MaxFileDescriptor=1048576, SystemLoadAverage=461, ProcessCount=11, AvailableMemoryMB=10168 2024-12-09T16:56:27,342 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-09T16:56:27,342 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-09T16:56:27,344 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestQuotaAdmin0 2024-12-09T16:56:27,344 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestQuotaAdmin0 Metrics about Tables on a single HBase RegionServer 2024-12-09T16:56:27,345 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestQuotaAdmin2 2024-12-09T16:56:27,345 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestQuotaAdmin2 Metrics about Tables on a single HBase RegionServer 2024-12-09T16:56:27,346 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_quota 2024-12-09T16:56:27,346 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_quota Metrics about Tables on a single HBase RegionServer 2024-12-09T16:56:27,349 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-09T16:56:27,349 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-09T16:56:27,350 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_TestNs_table_TestTable 2024-12-09T16:56:27,350 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_TestNs_table_TestTable Metrics about Tables on a single HBase RegionServer 2024-12-09T16:56:27,351 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.quotas.MasterQuotasObserver 2024-12-09T16:56:27,351 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.quotas.MasterQuotasObserver Metrics about HBase MasterObservers 2024-12-09T16:56:27,352 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T16:56:27,352 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-09T16:56:27,352 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestQuotaAdmin1 2024-12-09T16:56:27,352 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestQuotaAdmin1 Metrics about Tables on a single HBase RegionServer 2024-12-09T16:56:27,543 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T16:56:27,543 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-09T16:56:27,794 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-12-09T16:56:27,794 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1733774186265 bypass), TestNs=QuotaState(ts=1733774186265 bypass)} 2024-12-09T16:56:27,794 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestQuotaAdmin0=QuotaState(ts=1733774186265 bypass), TestNs:TestTable=QuotaState(ts=1733774186265 bypass), TestQuotaAdmin2=QuotaState(ts=1733774186265 bypass)} 2024-12-09T16:56:27,794 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1733774186265 [ default ])} 2024-12-09T16:56:27,794 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1733774186265 bypass)} 2024-12-09T16:56:28,045 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T16:56:28,045 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-09T16:56:28,296 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-12-09T16:56:28,296 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1733774186365 bypass), TestNs=QuotaState(ts=1733774186365 bypass)} 2024-12-09T16:56:28,296 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestNs:TestTable=QuotaState(ts=1733774186365 bypass), TestQuotaAdmin1=QuotaState(ts=1733774186365 bypass)} 2024-12-09T16:56:28,296 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1733774186365 [ default ])} 2024-12-09T16:56:28,296 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1733774186365 bypass)} 2024-12-09T16:56:28,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34213 {}] quotas.RegionServerRpcQuotaManager(245): Throttling exception for user=jenkins table=TestQuotaAdmin0 numWrites=0 numReads=1: number of read requests exceeded - wait 12sec, 0ms 2024-12-09T16:56:28,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34213 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Get size: 115 connection: 172.17.0.2:38640 deadline: 1733763398306, exception=org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 12sec, 0ms 2024-12-09T16:56:28,308 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestQuotaAdmin0,,1733763382505.aef056b9db773e1b5b352ca79819f223., hostname=7b4ec1980089,34213,1733763377986, seqNum=-1 , the old value is region=TestQuotaAdmin0,,1733763382505.aef056b9db773e1b5b352ca79819f223., hostname=7b4ec1980089,34213,1733763377986, seqNum=-1, error=org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T16:56:28,308 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestQuotaAdmin0,,1733763382505.aef056b9db773e1b5b352ca79819f223., hostname=7b4ec1980089,34213,1733763377986, seqNum=-1 is org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T16:56:28,308 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestQuotaAdmin0,,1733763382505.aef056b9db773e1b5b352ca79819f223., hostname=7b4ec1980089,34213,1733763377986, seqNum=-1 because the exception is null or not the one we care about 2024-12-09T16:56:28,308 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] backoff.HBaseServerExceptionPauseManager(61): RpcThrottlingException suggested pause of 12000000000ns which would exceed the timeout. We should throw instead. org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-12-09T16:56:28,309 ERROR [Time-limited test {}] quotas.ThrottleQuotaTestUtil(110): get failed after nRetries=5 org.apache.hadoop.hbase.client.RetriesExhaustedException: Failed after attempts=1, exceptions: 2024-12-09T16:56:28.308Z, org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.TableOverAsyncTable.get(TableOverAsyncTable.java:188) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doGets(ThrottleQuotaTestUtil.java:105) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.testUserNamespaceClusterScopeQuota(TestClusterScopeQuotaThrottle.java:199) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] at --------Future.get--------(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.completeExceptionally(AsyncRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.tryScheduleRetry(AsyncRpcRetryingCaller.java:130) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:215) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 42 more Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-12-09T16:56:28,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34213 {}] quotas.RegionServerRpcQuotaManager(245): Throttling exception for user=jenkins table=TestQuotaAdmin0 numWrites=1 numReads=0: number of write requests exceeded - wait 10sec, 0ms 2024-12-09T16:56:28,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34213 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 143 connection: 172.17.0.2:38640 deadline: 1733763398332, exception=org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms 2024-12-09T16:56:28,335 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestQuotaAdmin0,,1733763382505.aef056b9db773e1b5b352ca79819f223., hostname=7b4ec1980089,34213,1733763377986, seqNum=-1 , the old value is region=TestQuotaAdmin0,,1733763382505.aef056b9db773e1b5b352ca79819f223., hostname=7b4ec1980089,34213,1733763377986, seqNum=-1, error=org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T16:56:28,335 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestQuotaAdmin0,,1733763382505.aef056b9db773e1b5b352ca79819f223., hostname=7b4ec1980089,34213,1733763377986, seqNum=-1 is org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T16:56:28,335 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestQuotaAdmin0,,1733763382505.aef056b9db773e1b5b352ca79819f223., hostname=7b4ec1980089,34213,1733763377986, seqNum=-1 because the exception is null or not the one we care about 2024-12-09T16:56:28,335 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] backoff.HBaseServerExceptionPauseManager(61): RpcThrottlingException suggested pause of 10000000000ns which would exceed the timeout. We should throw instead. org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-12-09T16:56:28,336 ERROR [Time-limited test {}] quotas.ThrottleQuotaTestUtil(86): put failed after nRetries=6 org.apache.hadoop.hbase.client.RetriesExhaustedException: Failed after attempts=1, exceptions: 2024-12-09T16:56:28.335Z, org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.TableOverAsyncTable.put(TableOverAsyncTable.java:213) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doPuts(ThrottleQuotaTestUtil.java:81) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doPuts(ThrottleQuotaTestUtil.java:64) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.testUserNamespaceClusterScopeQuota(TestClusterScopeQuotaThrottle.java:200) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] at --------Future.get--------(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.completeExceptionally(AsyncRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.tryScheduleRetry(AsyncRpcRetryingCaller.java:130) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:215) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 42 more Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-12-09T16:56:28,595 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T16:56:28,595 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-12-09T16:56:28,595 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1733777786365 bypass), TestNs=QuotaState(ts=1733777786365 bypass)} 2024-12-09T16:56:28,595 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestQuotaAdmin0=QuotaState(ts=1733777786365 bypass), TestNs:TestTable=QuotaState(ts=1733777786365 bypass), TestQuotaAdmin2=QuotaState(ts=1733777786365 bypass)} 2024-12-09T16:56:28,595 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1733777786365 bypass)} 2024-12-09T16:56:28,595 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1733777786365 bypass)} 2024-12-09T16:56:28,846 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T16:56:28,846 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-12-09T16:56:28,846 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1733777786365 bypass), TestNs=QuotaState(ts=1733777786365 bypass)} 2024-12-09T16:56:28,846 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestNs:TestTable=QuotaState(ts=1733777786365 bypass), TestQuotaAdmin1=QuotaState(ts=1733777786365 bypass)} 2024-12-09T16:56:28,846 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1733777786365 bypass)} 2024-12-09T16:56:28,846 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1733777786365 bypass)} 2024-12-09T16:56:28,856 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: quotas.TestClusterScopeQuotaThrottle#testUserNamespaceClusterScopeQuota Thread=300 (was 301), OpenFileDescriptor=547 (was 549), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=461 (was 461), ProcessCount=11 (was 11), AvailableMemoryMB=10163 (was 10168) 2024-12-09T16:56:28,878 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: quotas.TestClusterScopeQuotaThrottle#testUserClusterScopeQuota Thread=300, OpenFileDescriptor=547, MaxFileDescriptor=1048576, SystemLoadAverage=461, ProcessCount=11, AvailableMemoryMB=10162 2024-12-09T16:56:29,141 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T16:56:29,142 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-09T16:56:29,392 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-12-09T16:56:29,392 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1733781386465 bypass), TestNs=QuotaState(ts=1733781386465 bypass)} 2024-12-09T16:56:29,392 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestQuotaAdmin0=QuotaState(ts=1733781386465 bypass), TestNs:TestTable=QuotaState(ts=1733781386465 bypass), TestQuotaAdmin2=QuotaState(ts=1733781386465 bypass)} 2024-12-09T16:56:29,392 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1733781386465 global-limiter)} 2024-12-09T16:56:29,392 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1733781386465 bypass)} 2024-12-09T16:56:29,643 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T16:56:29,643 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-09T16:56:29,894 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-12-09T16:56:29,894 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1733781386565 bypass), TestNs=QuotaState(ts=1733781386565 bypass)} 2024-12-09T16:56:29,894 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestNs:TestTable=QuotaState(ts=1733781386565 bypass), TestQuotaAdmin1=QuotaState(ts=1733781386565 bypass)} 2024-12-09T16:56:29,894 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1733781386565 global-limiter)} 2024-12-09T16:56:29,894 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1733781386565 bypass)} 2024-12-09T16:56:29,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34213 {}] quotas.RegionServerRpcQuotaManager(245): Throttling exception for user=jenkins table=TestQuotaAdmin0 numWrites=1 numReads=0: number of write requests exceeded - wait 10sec, 0ms 2024-12-09T16:56:29,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34213 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 143 connection: 172.17.0.2:38640 deadline: 1733763399914, exception=org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms 2024-12-09T16:56:29,916 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestQuotaAdmin0,,1733763382505.aef056b9db773e1b5b352ca79819f223., hostname=7b4ec1980089,34213,1733763377986, seqNum=-1 , the old value is region=TestQuotaAdmin0,,1733763382505.aef056b9db773e1b5b352ca79819f223., hostname=7b4ec1980089,34213,1733763377986, seqNum=-1, error=org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T16:56:29,916 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestQuotaAdmin0,,1733763382505.aef056b9db773e1b5b352ca79819f223., hostname=7b4ec1980089,34213,1733763377986, seqNum=-1 is org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T16:56:29,916 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestQuotaAdmin0,,1733763382505.aef056b9db773e1b5b352ca79819f223., hostname=7b4ec1980089,34213,1733763377986, seqNum=-1 because the exception is null or not the one we care about 2024-12-09T16:56:29,916 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] backoff.HBaseServerExceptionPauseManager(61): RpcThrottlingException suggested pause of 10000000000ns which would exceed the timeout. We should throw instead. org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-12-09T16:56:29,917 ERROR [Time-limited test {}] quotas.ThrottleQuotaTestUtil(86): put failed after nRetries=6 org.apache.hadoop.hbase.client.RetriesExhaustedException: Failed after attempts=1, exceptions: 2024-12-09T16:56:29.916Z, org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.TableOverAsyncTable.put(TableOverAsyncTable.java:213) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doPuts(ThrottleQuotaTestUtil.java:81) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doPuts(ThrottleQuotaTestUtil.java:64) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.testUserClusterScopeQuota(TestClusterScopeQuotaThrottle.java:178) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] at --------Future.get--------(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.completeExceptionally(AsyncRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.tryScheduleRetry(AsyncRpcRetryingCaller.java:130) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:215) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 42 more Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-12-09T16:56:29,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34213 {}] quotas.RegionServerRpcQuotaManager(245): Throttling exception for user=jenkins table=TestQuotaAdmin0 numWrites=0 numReads=1: number of read requests exceeded - wait 20sec, 0ms 2024-12-09T16:56:29,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34213 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Get size: 115 connection: 172.17.0.2:38640 deadline: 1733763399925, exception=org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 20sec, 0ms 2024-12-09T16:56:29,927 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestQuotaAdmin0,,1733763382505.aef056b9db773e1b5b352ca79819f223., hostname=7b4ec1980089,34213,1733763377986, seqNum=-1 , the old value is region=TestQuotaAdmin0,,1733763382505.aef056b9db773e1b5b352ca79819f223., hostname=7b4ec1980089,34213,1733763377986, seqNum=-1, error=org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 20sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T16:56:29,928 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestQuotaAdmin0,,1733763382505.aef056b9db773e1b5b352ca79819f223., hostname=7b4ec1980089,34213,1733763377986, seqNum=-1 is org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 20sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T16:56:29,928 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestQuotaAdmin0,,1733763382505.aef056b9db773e1b5b352ca79819f223., hostname=7b4ec1980089,34213,1733763377986, seqNum=-1 because the exception is null or not the one we care about 2024-12-09T16:56:29,928 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] backoff.HBaseServerExceptionPauseManager(61): RpcThrottlingException suggested pause of 20000000000ns which would exceed the timeout. We should throw instead. org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 20sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 20sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-12-09T16:56:29,929 ERROR [Time-limited test {}] quotas.ThrottleQuotaTestUtil(110): get failed after nRetries=3 org.apache.hadoop.hbase.client.RetriesExhaustedException: Failed after attempts=1, exceptions: 2024-12-09T16:56:29.928Z, org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 20sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.TableOverAsyncTable.get(TableOverAsyncTable.java:188) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doGets(ThrottleQuotaTestUtil.java:105) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.testUserClusterScopeQuota(TestClusterScopeQuotaThrottle.java:179) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] at --------Future.get--------(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.completeExceptionally(AsyncRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.tryScheduleRetry(AsyncRpcRetryingCaller.java:130) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:215) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 20sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 42 more Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 20sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-12-09T16:56:30,186 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T16:56:30,187 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-12-09T16:56:30,187 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1733784986565 bypass), TestNs=QuotaState(ts=1733784986565 bypass)} 2024-12-09T16:56:30,187 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestQuotaAdmin0=QuotaState(ts=1733784986565 bypass), TestNs:TestTable=QuotaState(ts=1733784986565 bypass), TestQuotaAdmin2=QuotaState(ts=1733784986565 bypass)} 2024-12-09T16:56:30,187 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1733784986565 bypass)} 2024-12-09T16:56:30,187 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1733784986565 bypass)} 2024-12-09T16:56:30,437 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T16:56:30,438 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-12-09T16:56:30,438 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1733784986565 bypass), TestNs=QuotaState(ts=1733784986565 bypass)} 2024-12-09T16:56:30,438 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestNs:TestTable=QuotaState(ts=1733784986565 bypass), TestQuotaAdmin1=QuotaState(ts=1733784986565 bypass)} 2024-12-09T16:56:30,438 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1733784986565 bypass)} 2024-12-09T16:56:30,438 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1733784986565 bypass)} 2024-12-09T16:56:30,447 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: quotas.TestClusterScopeQuotaThrottle#testUserClusterScopeQuota Thread=299 (was 300), OpenFileDescriptor=546 (was 547), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=464 (was 461) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=10151 (was 10162) 2024-12-09T16:56:30,457 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: quotas.TestClusterScopeQuotaThrottle#testTableClusterScopeQuota Thread=299, OpenFileDescriptor=546, MaxFileDescriptor=1048576, SystemLoadAverage=464, ProcessCount=11, AvailableMemoryMB=10150 2024-12-09T16:56:30,714 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T16:56:30,714 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-12-09T16:56:30,714 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1733788586565 bypass), TestNs=QuotaState(ts=1733788586565 bypass)} 2024-12-09T16:56:30,717 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestQuotaAdmin0=QuotaState(ts=1733788586565 bypass), TestNs:TestTable=QuotaState(ts=1733788586565 TimeBasedLimiter( readReqs=AverageIntervalRateLimiter(avail=10 limit=10 tunit=3600000))), TestQuotaAdmin2=QuotaState(ts=1733788586565 bypass)} 2024-12-09T16:56:30,717 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {} 2024-12-09T16:56:30,717 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1733788586565 bypass)} 2024-12-09T16:56:30,968 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T16:56:30,968 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-12-09T16:56:30,968 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1733788586565 bypass), TestNs=QuotaState(ts=1733788586565 bypass)} 2024-12-09T16:56:30,968 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestNs:TestTable=QuotaState(ts=1733788586565 TimeBasedLimiter( readReqs=AverageIntervalRateLimiter(avail=10 limit=10 tunit=3600000))), TestQuotaAdmin1=QuotaState(ts=1733788586565 bypass)} 2024-12-09T16:56:30,968 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {} 2024-12-09T16:56:30,968 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1733788586565 bypass)} 2024-12-09T16:56:30,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34213 {}] quotas.RegionServerRpcQuotaManager(245): Throttling exception for user=jenkins table=TestNs:TestTable numWrites=0 numReads=1: number of read requests exceeded - wait 6mins, 0ms 2024-12-09T16:56:30,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34213 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Get size: 118 connection: 172.17.0.2:38640 deadline: 1733763400982, exception=org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms 2024-12-09T16:56:30,984 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestNs:TestTable,1,1733763385234.434d27bd1b99346c64f656cd110c7695., hostname=7b4ec1980089,34213,1733763377986, seqNum=-1 , the old value is region=TestNs:TestTable,1,1733763385234.434d27bd1b99346c64f656cd110c7695., hostname=7b4ec1980089,34213,1733763377986, seqNum=-1, error=org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T16:56:30,984 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestNs:TestTable,1,1733763385234.434d27bd1b99346c64f656cd110c7695., hostname=7b4ec1980089,34213,1733763377986, seqNum=-1 is org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T16:56:30,984 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestNs:TestTable,1,1733763385234.434d27bd1b99346c64f656cd110c7695., hostname=7b4ec1980089,34213,1733763377986, seqNum=-1 because the exception is null or not the one we care about 2024-12-09T16:56:30,984 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] backoff.HBaseServerExceptionPauseManager(61): RpcThrottlingException suggested pause of 360000000000ns which would exceed the timeout. We should throw instead. org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-12-09T16:56:30,985 ERROR [Time-limited test {}] quotas.ThrottleQuotaTestUtil(110): get failed after nRetries=10 org.apache.hadoop.hbase.client.RetriesExhaustedException: Failed after attempts=1, exceptions: 2024-12-09T16:56:30.984Z, org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.TableOverAsyncTable.get(TableOverAsyncTable.java:188) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doGets(ThrottleQuotaTestUtil.java:105) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.testTableClusterScopeQuota(TestClusterScopeQuotaThrottle.java:151) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] at --------Future.get--------(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.completeExceptionally(AsyncRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.tryScheduleRetry(AsyncRpcRetryingCaller.java:130) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:215) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 42 more Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-12-09T16:56:30,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34213 {}] quotas.RegionServerRpcQuotaManager(245): Throttling exception for user=jenkins table=TestNs:TestTable numWrites=0 numReads=1: number of read requests exceeded - wait 6mins, 0ms 2024-12-09T16:56:30,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34213 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Get size: 117 connection: 172.17.0.2:38640 deadline: 1733763400986, exception=org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms 2024-12-09T16:56:30,989 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestNs:TestTable,1,1733763385234.434d27bd1b99346c64f656cd110c7695., hostname=7b4ec1980089,34213,1733763377986, seqNum=-1 , the old value is region=TestNs:TestTable,1,1733763385234.434d27bd1b99346c64f656cd110c7695., hostname=7b4ec1980089,34213,1733763377986, seqNum=-1, error=org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T16:56:30,989 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestNs:TestTable,1,1733763385234.434d27bd1b99346c64f656cd110c7695., hostname=7b4ec1980089,34213,1733763377986, seqNum=-1 is org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T16:56:30,989 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestNs:TestTable,1,1733763385234.434d27bd1b99346c64f656cd110c7695., hostname=7b4ec1980089,34213,1733763377986, seqNum=-1 because the exception is null or not the one we care about 2024-12-09T16:56:30,989 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] backoff.HBaseServerExceptionPauseManager(61): RpcThrottlingException suggested pause of 360000000000ns which would exceed the timeout. We should throw instead. org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-12-09T16:56:30,990 ERROR [Time-limited test {}] quotas.ThrottleQuotaTestUtil(110): get failed after nRetries=0 org.apache.hadoop.hbase.client.RetriesExhaustedException: Failed after attempts=1, exceptions: 2024-12-09T16:56:30.989Z, org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.TableOverAsyncTable.get(TableOverAsyncTable.java:188) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doGets(ThrottleQuotaTestUtil.java:105) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.testTableClusterScopeQuota(TestClusterScopeQuotaThrottle.java:151) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] at --------Future.get--------(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.completeExceptionally(AsyncRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.tryScheduleRetry(AsyncRpcRetryingCaller.java:130) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:215) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 42 more Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-12-09T16:56:31,249 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T16:56:31,249 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-12-09T16:56:31,249 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1733792186565 bypass), TestNs=QuotaState(ts=1733792186565 bypass)} 2024-12-09T16:56:31,249 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestQuotaAdmin0=QuotaState(ts=1733792186565 bypass), TestNs:TestTable=QuotaState(ts=1733792186565 bypass), TestQuotaAdmin2=QuotaState(ts=1733792186565 bypass)} 2024-12-09T16:56:31,249 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1733792186565 bypass)} 2024-12-09T16:56:31,249 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1733792186565 bypass)} 2024-12-09T16:56:31,500 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T16:56:31,500 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-12-09T16:56:31,500 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1733792186565 bypass), TestNs=QuotaState(ts=1733792186565 bypass)} 2024-12-09T16:56:31,501 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestNs:TestTable=QuotaState(ts=1733792186565 bypass), TestQuotaAdmin1=QuotaState(ts=1733792186565 bypass)} 2024-12-09T16:56:31,501 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {} 2024-12-09T16:56:31,501 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1733792186565 bypass)} 2024-12-09T16:56:31,517 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: quotas.TestClusterScopeQuotaThrottle#testTableClusterScopeQuota Thread=299 (was 299), OpenFileDescriptor=545 (was 546), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=464 (was 464), ProcessCount=11 (was 11), AvailableMemoryMB=10149 (was 10150) 2024-12-09T16:56:31,525 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: quotas.TestClusterScopeQuotaThrottle#testNamespaceClusterScopeQuota Thread=299, OpenFileDescriptor=545, MaxFileDescriptor=1048576, SystemLoadAverage=464, ProcessCount=11, AvailableMemoryMB=10148 2024-12-09T16:56:31,788 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T16:56:31,788 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-12-09T16:56:31,788 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1733795786565 TimeBasedLimiter( writeReqs=AverageIntervalRateLimiter(avail=5 limit=5 tunit=60000) readReqs=AverageIntervalRateLimiter(avail=6 limit=6 tunit=60000))), TestNs=QuotaState(ts=1733795786565 bypass)} 2024-12-09T16:56:31,788 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestQuotaAdmin0=QuotaState(ts=1733795786565 bypass), TestNs:TestTable=QuotaState(ts=1733795786565 bypass), TestQuotaAdmin2=QuotaState(ts=1733795786565 bypass)} 2024-12-09T16:56:31,789 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {} 2024-12-09T16:56:31,789 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1733795786565 bypass)} 2024-12-09T16:56:32,039 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T16:56:32,039 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-12-09T16:56:32,039 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1733795786565 TimeBasedLimiter( writeReqs=AverageIntervalRateLimiter(avail=5 limit=5 tunit=60000) readReqs=AverageIntervalRateLimiter(avail=6 limit=6 tunit=60000))), TestNs=QuotaState(ts=1733795786565 bypass)} 2024-12-09T16:56:32,040 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestNs:TestTable=QuotaState(ts=1733795786565 bypass), TestQuotaAdmin1=QuotaState(ts=1733795786565 bypass)} 2024-12-09T16:56:32,040 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {} 2024-12-09T16:56:32,040 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1733795786565 bypass)} 2024-12-09T16:56:32,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34213 {}] quotas.RegionServerRpcQuotaManager(245): Throttling exception for user=jenkins table=TestQuotaAdmin0 numWrites=1 numReads=0: number of write requests exceeded - wait 12sec, 0ms 2024-12-09T16:56:32,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34213 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 143 connection: 172.17.0.2:38640 deadline: 1733763402054, exception=org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 12sec, 0ms 2024-12-09T16:56:32,057 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestQuotaAdmin0,,1733763382505.aef056b9db773e1b5b352ca79819f223., hostname=7b4ec1980089,34213,1733763377986, seqNum=-1 , the old value is region=TestQuotaAdmin0,,1733763382505.aef056b9db773e1b5b352ca79819f223., hostname=7b4ec1980089,34213,1733763377986, seqNum=-1, error=org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T16:56:32,057 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestQuotaAdmin0,,1733763382505.aef056b9db773e1b5b352ca79819f223., hostname=7b4ec1980089,34213,1733763377986, seqNum=-1 is org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T16:56:32,057 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestQuotaAdmin0,,1733763382505.aef056b9db773e1b5b352ca79819f223., hostname=7b4ec1980089,34213,1733763377986, seqNum=-1 because the exception is null or not the one we care about 2024-12-09T16:56:32,057 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] backoff.HBaseServerExceptionPauseManager(61): RpcThrottlingException suggested pause of 12000000000ns which would exceed the timeout. We should throw instead. org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-12-09T16:56:32,058 ERROR [Time-limited test {}] quotas.ThrottleQuotaTestUtil(86): put failed after nRetries=5 org.apache.hadoop.hbase.client.RetriesExhaustedException: Failed after attempts=1, exceptions: 2024-12-09T16:56:32.057Z, org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.TableOverAsyncTable.put(TableOverAsyncTable.java:213) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doPuts(ThrottleQuotaTestUtil.java:81) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doPuts(ThrottleQuotaTestUtil.java:64) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.testNamespaceClusterScopeQuota(TestClusterScopeQuotaThrottle.java:128) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] at --------Future.get--------(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.completeExceptionally(AsyncRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.tryScheduleRetry(AsyncRpcRetryingCaller.java:130) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:215) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 42 more Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-12-09T16:56:32,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34213 {}] quotas.RegionServerRpcQuotaManager(245): Throttling exception for user=jenkins table=TestQuotaAdmin0 numWrites=0 numReads=1: number of read requests exceeded - wait 10sec, 0ms 2024-12-09T16:56:32,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34213 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Get size: 115 connection: 172.17.0.2:38640 deadline: 1733763402070, exception=org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 10sec, 0ms 2024-12-09T16:56:32,073 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestQuotaAdmin0,,1733763382505.aef056b9db773e1b5b352ca79819f223., hostname=7b4ec1980089,34213,1733763377986, seqNum=-1 , the old value is region=TestQuotaAdmin0,,1733763382505.aef056b9db773e1b5b352ca79819f223., hostname=7b4ec1980089,34213,1733763377986, seqNum=-1, error=org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T16:56:32,073 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestQuotaAdmin0,,1733763382505.aef056b9db773e1b5b352ca79819f223., hostname=7b4ec1980089,34213,1733763377986, seqNum=-1 is org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T16:56:32,073 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestQuotaAdmin0,,1733763382505.aef056b9db773e1b5b352ca79819f223., hostname=7b4ec1980089,34213,1733763377986, seqNum=-1 because the exception is null or not the one we care about 2024-12-09T16:56:32,073 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] backoff.HBaseServerExceptionPauseManager(61): RpcThrottlingException suggested pause of 10000000000ns which would exceed the timeout. We should throw instead. org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-12-09T16:56:32,074 ERROR [Time-limited test {}] quotas.ThrottleQuotaTestUtil(110): get failed after nRetries=6 org.apache.hadoop.hbase.client.RetriesExhaustedException: Failed after attempts=1, exceptions: 2024-12-09T16:56:32.073Z, org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.TableOverAsyncTable.get(TableOverAsyncTable.java:188) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doGets(ThrottleQuotaTestUtil.java:105) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.testNamespaceClusterScopeQuota(TestClusterScopeQuotaThrottle.java:129) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] at --------Future.get--------(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.completeExceptionally(AsyncRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.tryScheduleRetry(AsyncRpcRetryingCaller.java:130) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:215) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 42 more Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-12-09T16:56:32,334 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T16:56:32,334 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-12-09T16:56:32,334 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1733799386565 bypass), TestNs=QuotaState(ts=1733799386565 bypass)} 2024-12-09T16:56:32,334 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestQuotaAdmin0=QuotaState(ts=1733799386565 bypass), TestNs:TestTable=QuotaState(ts=1733799386565 bypass), TestQuotaAdmin2=QuotaState(ts=1733799386565 bypass)} 2024-12-09T16:56:32,335 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1733799386565 bypass)} 2024-12-09T16:56:32,335 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1733799386565 bypass)} 2024-12-09T16:56:32,585 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T16:56:32,586 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-12-09T16:56:32,586 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1733799386565 bypass), TestNs=QuotaState(ts=1733799386565 bypass)} 2024-12-09T16:56:32,586 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestNs:TestTable=QuotaState(ts=1733799386565 bypass), TestQuotaAdmin1=QuotaState(ts=1733799386565 bypass)} 2024-12-09T16:56:32,586 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {} 2024-12-09T16:56:32,586 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1733799386565 bypass)} 2024-12-09T16:56:32,600 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: quotas.TestClusterScopeQuotaThrottle#testNamespaceClusterScopeQuota Thread=299 (was 299), OpenFileDescriptor=545 (was 545), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=464 (was 464), ProcessCount=11 (was 11), AvailableMemoryMB=10145 (was 10148) 2024-12-09T16:56:32,604 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable TestQuotaAdmin0 2024-12-09T16:56:32,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] procedure2.ProcedureExecutor(1139): Stored pid=22, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=TestQuotaAdmin0 2024-12-09T16:56:32,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=22 2024-12-09T16:56:32,612 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin0","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733763392612"}]},"ts":"1733763392612"} 2024-12-09T16:56:32,615 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin0, state=DISABLING in hbase:meta 2024-12-09T16:56:32,615 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestQuotaAdmin0 to state=DISABLING 2024-12-09T16:56:32,617 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=TestQuotaAdmin0}] 2024-12-09T16:56:32,622 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=24, ppid=23, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin0, region=aef056b9db773e1b5b352ca79819f223, UNASSIGN}] 2024-12-09T16:56:32,624 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=24, ppid=23, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin0, region=aef056b9db773e1b5b352ca79819f223, UNASSIGN 2024-12-09T16:56:32,626 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=24 updating hbase:meta row=aef056b9db773e1b5b352ca79819f223, regionState=CLOSING, regionLocation=7b4ec1980089,34213,1733763377986 2024-12-09T16:56:32,629 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=24, ppid=23, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestQuotaAdmin0, region=aef056b9db773e1b5b352ca79819f223, UNASSIGN because future has completed 2024-12-09T16:56:32,629 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T16:56:32,630 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE, hasLock=false; CloseRegionProcedure aef056b9db773e1b5b352ca79819f223, server=7b4ec1980089,34213,1733763377986}] 2024-12-09T16:56:32,789 INFO [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] handler.UnassignRegionHandler(122): Close aef056b9db773e1b5b352ca79819f223 2024-12-09T16:56:32,789 DEBUG [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T16:56:32,789 DEBUG [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegion(1722): Closing aef056b9db773e1b5b352ca79819f223, disabling compactions & flushes 2024-12-09T16:56:32,789 INFO [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegion(1755): Closing region TestQuotaAdmin0,,1733763382505.aef056b9db773e1b5b352ca79819f223. 2024-12-09T16:56:32,790 DEBUG [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegion(1776): Time limited wait for close lock on TestQuotaAdmin0,,1733763382505.aef056b9db773e1b5b352ca79819f223. 2024-12-09T16:56:32,790 DEBUG [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegion(1843): Acquired close lock on TestQuotaAdmin0,,1733763382505.aef056b9db773e1b5b352ca79819f223. after waiting 0 ms 2024-12-09T16:56:32,790 DEBUG [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegion(1853): Updates disabled for region TestQuotaAdmin0,,1733763382505.aef056b9db773e1b5b352ca79819f223. 2024-12-09T16:56:32,794 INFO [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegion(2902): Flushing aef056b9db773e1b5b352ca79819f223 1/1 column families, dataSize=578 B heapSize=2.11 KB 2024-12-09T16:56:32,839 DEBUG [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/default/TestQuotaAdmin0/aef056b9db773e1b5b352ca79819f223/.tmp/cf/300cbc62123e4adb9597204ad1aa5c78 is 38, key is row-0/cf:q/1733763392041/Put/seqid=0 2024-12-09T16:56:32,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32855 is added to blk_1073741847_1023 (size=4967) 2024-12-09T16:56:32,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39355 is added to blk_1073741847_1023 (size=4967) 2024-12-09T16:56:32,849 INFO [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=578 B at sequenceid=21 (bloomFilter=false), to=hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/default/TestQuotaAdmin0/aef056b9db773e1b5b352ca79819f223/.tmp/cf/300cbc62123e4adb9597204ad1aa5c78 2024-12-09T16:56:32,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=22 2024-12-09T16:56:32,893 DEBUG [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/default/TestQuotaAdmin0/aef056b9db773e1b5b352ca79819f223/.tmp/cf/300cbc62123e4adb9597204ad1aa5c78 as hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/default/TestQuotaAdmin0/aef056b9db773e1b5b352ca79819f223/cf/300cbc62123e4adb9597204ad1aa5c78 2024-12-09T16:56:32,903 INFO [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/default/TestQuotaAdmin0/aef056b9db773e1b5b352ca79819f223/cf/300cbc62123e4adb9597204ad1aa5c78, entries=6, sequenceid=21, filesize=4.9 K 2024-12-09T16:56:32,910 INFO [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegion(3140): Finished flush of dataSize ~578 B/578, heapSize ~2.09 KB/2144, currentSize=0 B/0 for aef056b9db773e1b5b352ca79819f223 in 116ms, sequenceid=21, compaction requested=false 2024-12-09T16:56:32,917 DEBUG [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/default/TestQuotaAdmin0/aef056b9db773e1b5b352ca79819f223/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-12-09T16:56:32,920 INFO [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegion(1973): Closed TestQuotaAdmin0,,1733763382505.aef056b9db773e1b5b352ca79819f223. 2024-12-09T16:56:32,920 DEBUG [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegion(1676): Region close journal for aef056b9db773e1b5b352ca79819f223: Waiting for close lock at 1733763392789Running coprocessor pre-close hooks at 1733763392789Disabling compacts and flushes for region at 1733763392789Disabling writes for close at 1733763392790 (+1 ms)Obtaining lock to block concurrent updates at 1733763392794 (+4 ms)Preparing flush snapshotting stores in aef056b9db773e1b5b352ca79819f223 at 1733763392794Finished memstore snapshotting TestQuotaAdmin0,,1733763382505.aef056b9db773e1b5b352ca79819f223., syncing WAL and waiting on mvcc, flushsize=dataSize=578, getHeapSize=2144, getOffHeapSize=0, getCellsCount=17 at 1733763392800 (+6 ms)Flushing stores of TestQuotaAdmin0,,1733763382505.aef056b9db773e1b5b352ca79819f223. at 1733763392802 (+2 ms)Flushing aef056b9db773e1b5b352ca79819f223/cf: creating writer at 1733763392804 (+2 ms)Flushing aef056b9db773e1b5b352ca79819f223/cf: appending metadata at 1733763392834 (+30 ms)Flushing aef056b9db773e1b5b352ca79819f223/cf: closing flushed file at 1733763392835 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1c883923: reopening flushed file at 1733763392891 (+56 ms)Finished flush of dataSize ~578 B/578, heapSize ~2.09 KB/2144, currentSize=0 B/0 for aef056b9db773e1b5b352ca79819f223 in 116ms, sequenceid=21, compaction requested=false at 1733763392910 (+19 ms)Writing region close event to WAL at 1733763392912 (+2 ms)Running coprocessor post-close hooks at 1733763392918 (+6 ms)Closed at 1733763392920 (+2 ms) 2024-12-09T16:56:32,923 INFO [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] handler.UnassignRegionHandler(157): Closed aef056b9db773e1b5b352ca79819f223 2024-12-09T16:56:32,924 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=24 updating hbase:meta row=aef056b9db773e1b5b352ca79819f223, regionState=CLOSED 2024-12-09T16:56:32,926 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=25, ppid=24, state=RUNNABLE, hasLock=false; CloseRegionProcedure aef056b9db773e1b5b352ca79819f223, server=7b4ec1980089,34213,1733763377986 because future has completed 2024-12-09T16:56:32,930 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=25, resume processing ppid=24 2024-12-09T16:56:32,930 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=25, ppid=24, state=SUCCESS, hasLock=false; CloseRegionProcedure aef056b9db773e1b5b352ca79819f223, server=7b4ec1980089,34213,1733763377986 in 297 msec 2024-12-09T16:56:32,933 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=24, resume processing ppid=23 2024-12-09T16:56:32,933 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=24, ppid=23, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin0, region=aef056b9db773e1b5b352ca79819f223, UNASSIGN in 308 msec 2024-12-09T16:56:32,937 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=23, resume processing ppid=22 2024-12-09T16:56:32,937 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=23, ppid=22, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=TestQuotaAdmin0 in 317 msec 2024-12-09T16:56:32,939 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin0","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733763392939"}]},"ts":"1733763392939"} 2024-12-09T16:56:32,942 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin0, state=DISABLED in hbase:meta 2024-12-09T16:56:32,942 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestQuotaAdmin0 to state=DISABLED 2024-12-09T16:56:32,945 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=22, state=SUCCESS, hasLock=false; DisableTableProcedure table=TestQuotaAdmin0 in 337 msec 2024-12-09T16:56:33,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=22 2024-12-09T16:56:33,384 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:TestQuotaAdmin0 completed 2024-12-09T16:56:33,387 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete TestQuotaAdmin0 2024-12-09T16:56:33,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] procedure2.ProcedureExecutor(1139): Stored pid=26, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=TestQuotaAdmin0 2024-12-09T16:56:33,392 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=26, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin0 2024-12-09T16:56:33,394 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=26, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin0 2024-12-09T16:56:33,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=26 2024-12-09T16:56:33,400 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/default/TestQuotaAdmin0/aef056b9db773e1b5b352ca79819f223 2024-12-09T16:56:33,405 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/default/TestQuotaAdmin0/aef056b9db773e1b5b352ca79819f223/cf, FileablePath, hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/default/TestQuotaAdmin0/aef056b9db773e1b5b352ca79819f223/recovered.edits] 2024-12-09T16:56:33,415 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/default/TestQuotaAdmin0/aef056b9db773e1b5b352ca79819f223/cf/300cbc62123e4adb9597204ad1aa5c78 to hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/archive/data/default/TestQuotaAdmin0/aef056b9db773e1b5b352ca79819f223/cf/300cbc62123e4adb9597204ad1aa5c78 2024-12-09T16:56:33,420 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/default/TestQuotaAdmin0/aef056b9db773e1b5b352ca79819f223/recovered.edits/24.seqid to hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/archive/data/default/TestQuotaAdmin0/aef056b9db773e1b5b352ca79819f223/recovered.edits/24.seqid 2024-12-09T16:56:33,421 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/default/TestQuotaAdmin0/aef056b9db773e1b5b352ca79819f223 2024-12-09T16:56:33,421 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestQuotaAdmin0 regions 2024-12-09T16:56:33,427 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=26, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin0 2024-12-09T16:56:33,431 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36153 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-12-09T16:56:33,434 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestQuotaAdmin0 from hbase:meta 2024-12-09T16:56:33,437 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'TestQuotaAdmin0' descriptor. 2024-12-09T16:56:33,439 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=26, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin0 2024-12-09T16:56:33,440 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'TestQuotaAdmin0' from region states. 2024-12-09T16:56:33,440 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"TestQuotaAdmin0,,1733763382505.aef056b9db773e1b5b352ca79819f223.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733763393440"}]},"ts":"9223372036854775807"} 2024-12-09T16:56:33,443 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 1 regions from META 2024-12-09T16:56:33,443 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => aef056b9db773e1b5b352ca79819f223, NAME => 'TestQuotaAdmin0,,1733763382505.aef056b9db773e1b5b352ca79819f223.', STARTKEY => '', ENDKEY => ''}] 2024-12-09T16:56:33,443 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'TestQuotaAdmin0' as deleted. 2024-12-09T16:56:33,443 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"TestQuotaAdmin0","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733763393443"}]},"ts":"9223372036854775807"} 2024-12-09T16:56:33,446 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table TestQuotaAdmin0 state from META 2024-12-09T16:56:33,447 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=26, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin0 2024-12-09T16:56:33,449 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=26, state=SUCCESS, hasLock=false; DeleteTableProcedure table=TestQuotaAdmin0 in 59 msec 2024-12-09T16:56:33,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=26 2024-12-09T16:56:33,655 DEBUG [RPCClient-NioEventLoopGroup-5-4 {}] client.AsyncRegionLocator(219): Clear meta cache for TestQuotaAdmin0 2024-12-09T16:56:33,656 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:TestQuotaAdmin0 completed 2024-12-09T16:56:33,657 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable TestQuotaAdmin1 2024-12-09T16:56:33,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] procedure2.ProcedureExecutor(1139): Stored pid=27, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=TestQuotaAdmin1 2024-12-09T16:56:33,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=27 2024-12-09T16:56:33,665 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733763393665"}]},"ts":"1733763393665"} 2024-12-09T16:56:33,668 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin1, state=DISABLING in hbase:meta 2024-12-09T16:56:33,668 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestQuotaAdmin1 to state=DISABLING 2024-12-09T16:56:33,669 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=28, ppid=27, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=TestQuotaAdmin1}] 2024-12-09T16:56:33,671 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin1, region=90b87e721837483af81762da8fa8eee2, UNASSIGN}] 2024-12-09T16:56:33,672 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=29, ppid=28, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin1, region=90b87e721837483af81762da8fa8eee2, UNASSIGN 2024-12-09T16:56:33,673 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=29 updating hbase:meta row=90b87e721837483af81762da8fa8eee2, regionState=CLOSING, regionLocation=7b4ec1980089,36153,1733763378299 2024-12-09T16:56:33,675 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=29, ppid=28, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestQuotaAdmin1, region=90b87e721837483af81762da8fa8eee2, UNASSIGN because future has completed 2024-12-09T16:56:33,675 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T16:56:33,675 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=30, ppid=29, state=RUNNABLE, hasLock=false; CloseRegionProcedure 90b87e721837483af81762da8fa8eee2, server=7b4ec1980089,36153,1733763378299}] 2024-12-09T16:56:33,829 INFO [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=30}] handler.UnassignRegionHandler(122): Close 90b87e721837483af81762da8fa8eee2 2024-12-09T16:56:33,829 DEBUG [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=30}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T16:56:33,829 DEBUG [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=30}] regionserver.HRegion(1722): Closing 90b87e721837483af81762da8fa8eee2, disabling compactions & flushes 2024-12-09T16:56:33,829 INFO [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=30}] regionserver.HRegion(1755): Closing region TestQuotaAdmin1,,1733763383356.90b87e721837483af81762da8fa8eee2. 2024-12-09T16:56:33,829 DEBUG [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=30}] regionserver.HRegion(1776): Time limited wait for close lock on TestQuotaAdmin1,,1733763383356.90b87e721837483af81762da8fa8eee2. 2024-12-09T16:56:33,829 DEBUG [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=30}] regionserver.HRegion(1843): Acquired close lock on TestQuotaAdmin1,,1733763383356.90b87e721837483af81762da8fa8eee2. after waiting 0 ms 2024-12-09T16:56:33,829 DEBUG [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=30}] regionserver.HRegion(1853): Updates disabled for region TestQuotaAdmin1,,1733763383356.90b87e721837483af81762da8fa8eee2. 2024-12-09T16:56:33,835 DEBUG [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=30}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/default/TestQuotaAdmin1/90b87e721837483af81762da8fa8eee2/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-09T16:56:33,836 INFO [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=30}] regionserver.HRegion(1973): Closed TestQuotaAdmin1,,1733763383356.90b87e721837483af81762da8fa8eee2. 2024-12-09T16:56:33,836 DEBUG [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=30}] regionserver.HRegion(1676): Region close journal for 90b87e721837483af81762da8fa8eee2: Waiting for close lock at 1733763393829Running coprocessor pre-close hooks at 1733763393829Disabling compacts and flushes for region at 1733763393829Disabling writes for close at 1733763393829Writing region close event to WAL at 1733763393830 (+1 ms)Running coprocessor post-close hooks at 1733763393836 (+6 ms)Closed at 1733763393836 2024-12-09T16:56:33,839 INFO [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=30}] handler.UnassignRegionHandler(157): Closed 90b87e721837483af81762da8fa8eee2 2024-12-09T16:56:33,840 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=29 updating hbase:meta row=90b87e721837483af81762da8fa8eee2, regionState=CLOSED 2024-12-09T16:56:33,842 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=30, ppid=29, state=RUNNABLE, hasLock=false; CloseRegionProcedure 90b87e721837483af81762da8fa8eee2, server=7b4ec1980089,36153,1733763378299 because future has completed 2024-12-09T16:56:33,846 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=30, resume processing ppid=29 2024-12-09T16:56:33,846 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=30, ppid=29, state=SUCCESS, hasLock=false; CloseRegionProcedure 90b87e721837483af81762da8fa8eee2, server=7b4ec1980089,36153,1733763378299 in 168 msec 2024-12-09T16:56:33,848 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=29, resume processing ppid=28 2024-12-09T16:56:33,848 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=29, ppid=28, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin1, region=90b87e721837483af81762da8fa8eee2, UNASSIGN in 175 msec 2024-12-09T16:56:33,851 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=28, resume processing ppid=27 2024-12-09T16:56:33,851 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=28, ppid=27, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=TestQuotaAdmin1 in 180 msec 2024-12-09T16:56:33,852 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733763393852"}]},"ts":"1733763393852"} 2024-12-09T16:56:33,854 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin1, state=DISABLED in hbase:meta 2024-12-09T16:56:33,854 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestQuotaAdmin1 to state=DISABLED 2024-12-09T16:56:33,857 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=27, state=SUCCESS, hasLock=false; DisableTableProcedure table=TestQuotaAdmin1 in 198 msec 2024-12-09T16:56:33,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=27 2024-12-09T16:56:33,923 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:TestQuotaAdmin1 completed 2024-12-09T16:56:33,924 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete TestQuotaAdmin1 2024-12-09T16:56:33,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] procedure2.ProcedureExecutor(1139): Stored pid=31, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=TestQuotaAdmin1 2024-12-09T16:56:33,927 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=31, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin1 2024-12-09T16:56:33,928 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=31, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin1 2024-12-09T16:56:33,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-12-09T16:56:33,932 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/default/TestQuotaAdmin1/90b87e721837483af81762da8fa8eee2 2024-12-09T16:56:33,935 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/default/TestQuotaAdmin1/90b87e721837483af81762da8fa8eee2/cf, FileablePath, hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/default/TestQuotaAdmin1/90b87e721837483af81762da8fa8eee2/recovered.edits] 2024-12-09T16:56:33,943 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/default/TestQuotaAdmin1/90b87e721837483af81762da8fa8eee2/recovered.edits/4.seqid to hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/archive/data/default/TestQuotaAdmin1/90b87e721837483af81762da8fa8eee2/recovered.edits/4.seqid 2024-12-09T16:56:33,944 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/default/TestQuotaAdmin1/90b87e721837483af81762da8fa8eee2 2024-12-09T16:56:33,944 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestQuotaAdmin1 regions 2024-12-09T16:56:33,947 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=31, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin1 2024-12-09T16:56:33,950 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestQuotaAdmin1 from hbase:meta 2024-12-09T16:56:33,953 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'TestQuotaAdmin1' descriptor. 2024-12-09T16:56:33,955 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=31, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin1 2024-12-09T16:56:33,955 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'TestQuotaAdmin1' from region states. 2024-12-09T16:56:33,955 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"TestQuotaAdmin1,,1733763383356.90b87e721837483af81762da8fa8eee2.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733763393955"}]},"ts":"9223372036854775807"} 2024-12-09T16:56:33,957 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 1 regions from META 2024-12-09T16:56:33,958 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 90b87e721837483af81762da8fa8eee2, NAME => 'TestQuotaAdmin1,,1733763383356.90b87e721837483af81762da8fa8eee2.', STARTKEY => '', ENDKEY => ''}] 2024-12-09T16:56:33,958 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'TestQuotaAdmin1' as deleted. 2024-12-09T16:56:33,958 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"TestQuotaAdmin1","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733763393958"}]},"ts":"9223372036854775807"} 2024-12-09T16:56:33,960 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table TestQuotaAdmin1 state from META 2024-12-09T16:56:33,962 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=31, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin1 2024-12-09T16:56:33,964 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=31, state=SUCCESS, hasLock=false; DeleteTableProcedure table=TestQuotaAdmin1 in 38 msec 2024-12-09T16:56:34,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-12-09T16:56:34,194 DEBUG [RPCClient-NioEventLoopGroup-5-4 {}] client.AsyncRegionLocator(219): Clear meta cache for TestQuotaAdmin1 2024-12-09T16:56:34,194 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:TestQuotaAdmin1 completed 2024-12-09T16:56:34,195 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable TestQuotaAdmin2 2024-12-09T16:56:34,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] procedure2.ProcedureExecutor(1139): Stored pid=32, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=TestQuotaAdmin2 2024-12-09T16:56:34,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=32 2024-12-09T16:56:34,203 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin2","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733763394203"}]},"ts":"1733763394203"} 2024-12-09T16:56:34,205 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin2, state=DISABLING in hbase:meta 2024-12-09T16:56:34,205 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestQuotaAdmin2 to state=DISABLING 2024-12-09T16:56:34,206 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=TestQuotaAdmin2}] 2024-12-09T16:56:34,208 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=34, ppid=33, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin2, region=cd004b0305056e9cbc7135ba0180b1d6, UNASSIGN}] 2024-12-09T16:56:34,210 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=34, ppid=33, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin2, region=cd004b0305056e9cbc7135ba0180b1d6, UNASSIGN 2024-12-09T16:56:34,211 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=34 updating hbase:meta row=cd004b0305056e9cbc7135ba0180b1d6, regionState=CLOSING, regionLocation=7b4ec1980089,34213,1733763377986 2024-12-09T16:56:34,213 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=34, ppid=33, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestQuotaAdmin2, region=cd004b0305056e9cbc7135ba0180b1d6, UNASSIGN because future has completed 2024-12-09T16:56:34,213 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T16:56:34,213 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=35, ppid=34, state=RUNNABLE, hasLock=false; CloseRegionProcedure cd004b0305056e9cbc7135ba0180b1d6, server=7b4ec1980089,34213,1733763377986}] 2024-12-09T16:56:34,367 INFO [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(122): Close cd004b0305056e9cbc7135ba0180b1d6 2024-12-09T16:56:34,367 DEBUG [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T16:56:34,368 DEBUG [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1722): Closing cd004b0305056e9cbc7135ba0180b1d6, disabling compactions & flushes 2024-12-09T16:56:34,368 INFO [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1755): Closing region TestQuotaAdmin2,,1733763384156.cd004b0305056e9cbc7135ba0180b1d6. 2024-12-09T16:56:34,368 DEBUG [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1776): Time limited wait for close lock on TestQuotaAdmin2,,1733763384156.cd004b0305056e9cbc7135ba0180b1d6. 2024-12-09T16:56:34,368 DEBUG [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1843): Acquired close lock on TestQuotaAdmin2,,1733763384156.cd004b0305056e9cbc7135ba0180b1d6. after waiting 0 ms 2024-12-09T16:56:34,368 DEBUG [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1853): Updates disabled for region TestQuotaAdmin2,,1733763384156.cd004b0305056e9cbc7135ba0180b1d6. 2024-12-09T16:56:34,378 DEBUG [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/default/TestQuotaAdmin2/cd004b0305056e9cbc7135ba0180b1d6/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-09T16:56:34,380 INFO [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1973): Closed TestQuotaAdmin2,,1733763384156.cd004b0305056e9cbc7135ba0180b1d6. 2024-12-09T16:56:34,380 DEBUG [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1676): Region close journal for cd004b0305056e9cbc7135ba0180b1d6: Waiting for close lock at 1733763394368Running coprocessor pre-close hooks at 1733763394368Disabling compacts and flushes for region at 1733763394368Disabling writes for close at 1733763394368Writing region close event to WAL at 1733763394369 (+1 ms)Running coprocessor post-close hooks at 1733763394380 (+11 ms)Closed at 1733763394380 2024-12-09T16:56:34,384 INFO [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(157): Closed cd004b0305056e9cbc7135ba0180b1d6 2024-12-09T16:56:34,384 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=34 updating hbase:meta row=cd004b0305056e9cbc7135ba0180b1d6, regionState=CLOSED 2024-12-09T16:56:34,386 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=35, ppid=34, state=RUNNABLE, hasLock=false; CloseRegionProcedure cd004b0305056e9cbc7135ba0180b1d6, server=7b4ec1980089,34213,1733763377986 because future has completed 2024-12-09T16:56:34,389 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=35, resume processing ppid=34 2024-12-09T16:56:34,389 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=35, ppid=34, state=SUCCESS, hasLock=false; CloseRegionProcedure cd004b0305056e9cbc7135ba0180b1d6, server=7b4ec1980089,34213,1733763377986 in 174 msec 2024-12-09T16:56:34,392 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=34, resume processing ppid=33 2024-12-09T16:56:34,392 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=34, ppid=33, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin2, region=cd004b0305056e9cbc7135ba0180b1d6, UNASSIGN in 181 msec 2024-12-09T16:56:34,396 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=33, resume processing ppid=32 2024-12-09T16:56:34,396 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=33, ppid=32, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=TestQuotaAdmin2 in 187 msec 2024-12-09T16:56:34,398 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin2","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733763394398"}]},"ts":"1733763394398"} 2024-12-09T16:56:34,400 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin2, state=DISABLED in hbase:meta 2024-12-09T16:56:34,400 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestQuotaAdmin2 to state=DISABLED 2024-12-09T16:56:34,404 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=32, state=SUCCESS, hasLock=false; DisableTableProcedure table=TestQuotaAdmin2 in 207 msec 2024-12-09T16:56:34,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=32 2024-12-09T16:56:34,464 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:TestQuotaAdmin2 completed 2024-12-09T16:56:34,464 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete TestQuotaAdmin2 2024-12-09T16:56:34,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] procedure2.ProcedureExecutor(1139): Stored pid=36, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=TestQuotaAdmin2 2024-12-09T16:56:34,466 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=36, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin2 2024-12-09T16:56:34,467 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=36, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin2 2024-12-09T16:56:34,470 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/default/TestQuotaAdmin2/cd004b0305056e9cbc7135ba0180b1d6 2024-12-09T16:56:34,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=36 2024-12-09T16:56:34,473 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/default/TestQuotaAdmin2/cd004b0305056e9cbc7135ba0180b1d6/cf, FileablePath, hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/default/TestQuotaAdmin2/cd004b0305056e9cbc7135ba0180b1d6/recovered.edits] 2024-12-09T16:56:34,480 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/default/TestQuotaAdmin2/cd004b0305056e9cbc7135ba0180b1d6/recovered.edits/4.seqid to hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/archive/data/default/TestQuotaAdmin2/cd004b0305056e9cbc7135ba0180b1d6/recovered.edits/4.seqid 2024-12-09T16:56:34,481 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/default/TestQuotaAdmin2/cd004b0305056e9cbc7135ba0180b1d6 2024-12-09T16:56:34,481 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestQuotaAdmin2 regions 2024-12-09T16:56:34,485 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=36, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin2 2024-12-09T16:56:34,488 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestQuotaAdmin2 from hbase:meta 2024-12-09T16:56:34,491 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'TestQuotaAdmin2' descriptor. 2024-12-09T16:56:34,493 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=36, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin2 2024-12-09T16:56:34,493 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'TestQuotaAdmin2' from region states. 2024-12-09T16:56:34,493 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"TestQuotaAdmin2,,1733763384156.cd004b0305056e9cbc7135ba0180b1d6.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733763394493"}]},"ts":"9223372036854775807"} 2024-12-09T16:56:34,496 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 1 regions from META 2024-12-09T16:56:34,496 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => cd004b0305056e9cbc7135ba0180b1d6, NAME => 'TestQuotaAdmin2,,1733763384156.cd004b0305056e9cbc7135ba0180b1d6.', STARTKEY => '', ENDKEY => ''}] 2024-12-09T16:56:34,496 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'TestQuotaAdmin2' as deleted. 2024-12-09T16:56:34,497 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"TestQuotaAdmin2","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733763394496"}]},"ts":"9223372036854775807"} 2024-12-09T16:56:34,499 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table TestQuotaAdmin2 state from META 2024-12-09T16:56:34,500 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=36, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin2 2024-12-09T16:56:34,502 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=36, state=SUCCESS, hasLock=false; DeleteTableProcedure table=TestQuotaAdmin2 in 36 msec 2024-12-09T16:56:34,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=36 2024-12-09T16:56:34,723 DEBUG [RPCClient-NioEventLoopGroup-5-4 {}] client.AsyncRegionLocator(219): Clear meta cache for TestQuotaAdmin2 2024-12-09T16:56:34,724 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:TestQuotaAdmin2 completed 2024-12-09T16:56:34,724 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable TestNs:TestTable 2024-12-09T16:56:34,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] procedure2.ProcedureExecutor(1139): Stored pid=37, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=TestNs:TestTable 2024-12-09T16:56:34,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=37 2024-12-09T16:56:34,732 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestNs:TestTable","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733763394731"}]},"ts":"1733763394731"} 2024-12-09T16:56:34,734 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestNs:TestTable, state=DISABLING in hbase:meta 2024-12-09T16:56:34,734 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestNs:TestTable to state=DISABLING 2024-12-09T16:56:34,735 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=38, ppid=37, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=TestNs:TestTable}] 2024-12-09T16:56:34,737 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=c9cfeb687a0dd64ffa1b3e9c8c9d924c, UNASSIGN}, {pid=40, ppid=38, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=434d27bd1b99346c64f656cd110c7695, UNASSIGN}] 2024-12-09T16:56:34,739 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=40, ppid=38, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=434d27bd1b99346c64f656cd110c7695, UNASSIGN 2024-12-09T16:56:34,739 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=39, ppid=38, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=c9cfeb687a0dd64ffa1b3e9c8c9d924c, UNASSIGN 2024-12-09T16:56:34,740 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=40 updating hbase:meta row=434d27bd1b99346c64f656cd110c7695, regionState=CLOSING, regionLocation=7b4ec1980089,34213,1733763377986 2024-12-09T16:56:34,740 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=39 updating hbase:meta row=c9cfeb687a0dd64ffa1b3e9c8c9d924c, regionState=CLOSING, regionLocation=7b4ec1980089,36153,1733763378299 2024-12-09T16:56:34,742 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=39, ppid=38, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestNs:TestTable, region=c9cfeb687a0dd64ffa1b3e9c8c9d924c, UNASSIGN because future has completed 2024-12-09T16:56:34,742 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T16:56:34,743 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=41, ppid=39, state=RUNNABLE, hasLock=false; CloseRegionProcedure c9cfeb687a0dd64ffa1b3e9c8c9d924c, server=7b4ec1980089,36153,1733763378299}] 2024-12-09T16:56:34,743 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=40, ppid=38, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestNs:TestTable, region=434d27bd1b99346c64f656cd110c7695, UNASSIGN because future has completed 2024-12-09T16:56:34,744 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-09T16:56:34,744 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=42, ppid=40, state=RUNNABLE, hasLock=false; CloseRegionProcedure 434d27bd1b99346c64f656cd110c7695, server=7b4ec1980089,34213,1733763377986}] 2024-12-09T16:56:34,897 INFO [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] handler.UnassignRegionHandler(122): Close c9cfeb687a0dd64ffa1b3e9c8c9d924c 2024-12-09T16:56:34,897 DEBUG [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T16:56:34,898 DEBUG [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1722): Closing c9cfeb687a0dd64ffa1b3e9c8c9d924c, disabling compactions & flushes 2024-12-09T16:56:34,898 INFO [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1755): Closing region TestNs:TestTable,,1733763385234.c9cfeb687a0dd64ffa1b3e9c8c9d924c. 2024-12-09T16:56:34,898 DEBUG [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1776): Time limited wait for close lock on TestNs:TestTable,,1733763385234.c9cfeb687a0dd64ffa1b3e9c8c9d924c. 2024-12-09T16:56:34,898 DEBUG [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1843): Acquired close lock on TestNs:TestTable,,1733763385234.c9cfeb687a0dd64ffa1b3e9c8c9d924c. after waiting 0 ms 2024-12-09T16:56:34,898 DEBUG [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1853): Updates disabled for region TestNs:TestTable,,1733763385234.c9cfeb687a0dd64ffa1b3e9c8c9d924c. 2024-12-09T16:56:34,899 INFO [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(122): Close 434d27bd1b99346c64f656cd110c7695 2024-12-09T16:56:34,899 DEBUG [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-09T16:56:34,900 DEBUG [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1722): Closing 434d27bd1b99346c64f656cd110c7695, disabling compactions & flushes 2024-12-09T16:56:34,900 INFO [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1755): Closing region TestNs:TestTable,1,1733763385234.434d27bd1b99346c64f656cd110c7695. 2024-12-09T16:56:34,900 DEBUG [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1776): Time limited wait for close lock on TestNs:TestTable,1,1733763385234.434d27bd1b99346c64f656cd110c7695. 2024-12-09T16:56:34,900 DEBUG [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1843): Acquired close lock on TestNs:TestTable,1,1733763385234.434d27bd1b99346c64f656cd110c7695. after waiting 0 ms 2024-12-09T16:56:34,900 DEBUG [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1853): Updates disabled for region TestNs:TestTable,1,1733763385234.434d27bd1b99346c64f656cd110c7695. 2024-12-09T16:56:34,908 DEBUG [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/TestNs/TestTable/c9cfeb687a0dd64ffa1b3e9c8c9d924c/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-09T16:56:34,908 DEBUG [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/TestNs/TestTable/434d27bd1b99346c64f656cd110c7695/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-09T16:56:34,909 INFO [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1973): Closed TestNs:TestTable,,1733763385234.c9cfeb687a0dd64ffa1b3e9c8c9d924c. 2024-12-09T16:56:34,909 DEBUG [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1676): Region close journal for c9cfeb687a0dd64ffa1b3e9c8c9d924c: Waiting for close lock at 1733763394898Running coprocessor pre-close hooks at 1733763394898Disabling compacts and flushes for region at 1733763394898Disabling writes for close at 1733763394898Writing region close event to WAL at 1733763394900 (+2 ms)Running coprocessor post-close hooks at 1733763394909 (+9 ms)Closed at 1733763394909 2024-12-09T16:56:34,909 INFO [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1973): Closed TestNs:TestTable,1,1733763385234.434d27bd1b99346c64f656cd110c7695. 2024-12-09T16:56:34,909 DEBUG [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1676): Region close journal for 434d27bd1b99346c64f656cd110c7695: Waiting for close lock at 1733763394899Running coprocessor pre-close hooks at 1733763394899Disabling compacts and flushes for region at 1733763394899Disabling writes for close at 1733763394900 (+1 ms)Writing region close event to WAL at 1733763394901 (+1 ms)Running coprocessor post-close hooks at 1733763394909 (+8 ms)Closed at 1733763394909 2024-12-09T16:56:34,911 INFO [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(157): Closed 434d27bd1b99346c64f656cd110c7695 2024-12-09T16:56:34,911 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=40 updating hbase:meta row=434d27bd1b99346c64f656cd110c7695, regionState=CLOSED 2024-12-09T16:56:34,912 INFO [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] handler.UnassignRegionHandler(157): Closed c9cfeb687a0dd64ffa1b3e9c8c9d924c 2024-12-09T16:56:34,912 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=39 updating hbase:meta row=c9cfeb687a0dd64ffa1b3e9c8c9d924c, regionState=CLOSED 2024-12-09T16:56:34,914 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=42, ppid=40, state=RUNNABLE, hasLock=false; CloseRegionProcedure 434d27bd1b99346c64f656cd110c7695, server=7b4ec1980089,34213,1733763377986 because future has completed 2024-12-09T16:56:34,915 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=41, ppid=39, state=RUNNABLE, hasLock=false; CloseRegionProcedure c9cfeb687a0dd64ffa1b3e9c8c9d924c, server=7b4ec1980089,36153,1733763378299 because future has completed 2024-12-09T16:56:34,917 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=42, resume processing ppid=40 2024-12-09T16:56:34,918 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=42, ppid=40, state=SUCCESS, hasLock=false; CloseRegionProcedure 434d27bd1b99346c64f656cd110c7695, server=7b4ec1980089,34213,1733763377986 in 171 msec 2024-12-09T16:56:34,919 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=40, ppid=38, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=434d27bd1b99346c64f656cd110c7695, UNASSIGN in 180 msec 2024-12-09T16:56:34,919 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=41, resume processing ppid=39 2024-12-09T16:56:34,919 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=41, ppid=39, state=SUCCESS, hasLock=false; CloseRegionProcedure c9cfeb687a0dd64ffa1b3e9c8c9d924c, server=7b4ec1980089,36153,1733763378299 in 174 msec 2024-12-09T16:56:34,921 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=39, resume processing ppid=38 2024-12-09T16:56:34,921 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=39, ppid=38, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=c9cfeb687a0dd64ffa1b3e9c8c9d924c, UNASSIGN in 182 msec 2024-12-09T16:56:34,923 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=38, resume processing ppid=37 2024-12-09T16:56:34,923 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=38, ppid=37, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=TestNs:TestTable in 186 msec 2024-12-09T16:56:34,925 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestNs:TestTable","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733763394925"}]},"ts":"1733763394925"} 2024-12-09T16:56:34,927 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestNs:TestTable, state=DISABLED in hbase:meta 2024-12-09T16:56:34,927 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestNs:TestTable to state=DISABLED 2024-12-09T16:56:34,930 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=37, state=SUCCESS, hasLock=false; DisableTableProcedure table=TestNs:TestTable in 203 msec 2024-12-09T16:56:34,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=37 2024-12-09T16:56:34,993 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: TestNs:TestTable completed 2024-12-09T16:56:34,994 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete TestNs:TestTable 2024-12-09T16:56:34,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] procedure2.ProcedureExecutor(1139): Stored pid=43, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=TestNs:TestTable 2024-12-09T16:56:34,997 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=43, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=TestNs:TestTable 2024-12-09T16:56:34,998 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=43, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=TestNs:TestTable 2024-12-09T16:56:35,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=43 2024-12-09T16:56:35,004 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/TestNs/TestTable/c9cfeb687a0dd64ffa1b3e9c8c9d924c 2024-12-09T16:56:35,005 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/TestNs/TestTable/434d27bd1b99346c64f656cd110c7695 2024-12-09T16:56:35,008 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/TestNs/TestTable/434d27bd1b99346c64f656cd110c7695/cf, FileablePath, hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/TestNs/TestTable/434d27bd1b99346c64f656cd110c7695/recovered.edits] 2024-12-09T16:56:35,008 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/TestNs/TestTable/c9cfeb687a0dd64ffa1b3e9c8c9d924c/cf, FileablePath, hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/TestNs/TestTable/c9cfeb687a0dd64ffa1b3e9c8c9d924c/recovered.edits] 2024-12-09T16:56:35,016 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/TestNs/TestTable/434d27bd1b99346c64f656cd110c7695/recovered.edits/4.seqid to hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/archive/data/TestNs/TestTable/434d27bd1b99346c64f656cd110c7695/recovered.edits/4.seqid 2024-12-09T16:56:35,016 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/TestNs/TestTable/c9cfeb687a0dd64ffa1b3e9c8c9d924c/recovered.edits/4.seqid to hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/archive/data/TestNs/TestTable/c9cfeb687a0dd64ffa1b3e9c8c9d924c/recovered.edits/4.seqid 2024-12-09T16:56:35,017 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/TestNs/TestTable/434d27bd1b99346c64f656cd110c7695 2024-12-09T16:56:35,017 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/TestNs/TestTable/c9cfeb687a0dd64ffa1b3e9c8c9d924c 2024-12-09T16:56:35,017 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestNs:TestTable regions 2024-12-09T16:56:35,020 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=43, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=TestNs:TestTable 2024-12-09T16:56:35,023 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of TestNs:TestTable from hbase:meta 2024-12-09T16:56:35,025 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'TestNs:TestTable' descriptor. 2024-12-09T16:56:35,027 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=43, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=TestNs:TestTable 2024-12-09T16:56:35,027 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'TestNs:TestTable' from region states. 2024-12-09T16:56:35,027 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"TestNs:TestTable,,1733763385234.c9cfeb687a0dd64ffa1b3e9c8c9d924c.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733763395027"}]},"ts":"9223372036854775807"} 2024-12-09T16:56:35,027 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"TestNs:TestTable,1,1733763385234.434d27bd1b99346c64f656cd110c7695.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733763395027"}]},"ts":"9223372036854775807"} 2024-12-09T16:56:35,029 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-09T16:56:35,030 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => c9cfeb687a0dd64ffa1b3e9c8c9d924c, NAME => 'TestNs:TestTable,,1733763385234.c9cfeb687a0dd64ffa1b3e9c8c9d924c.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 434d27bd1b99346c64f656cd110c7695, NAME => 'TestNs:TestTable,1,1733763385234.434d27bd1b99346c64f656cd110c7695.', STARTKEY => '1', ENDKEY => ''}] 2024-12-09T16:56:35,030 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'TestNs:TestTable' as deleted. 2024-12-09T16:56:35,030 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"TestNs:TestTable","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733763395030"}]},"ts":"9223372036854775807"} 2024-12-09T16:56:35,032 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table TestNs:TestTable state from META 2024-12-09T16:56:35,033 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=43, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=TestNs:TestTable 2024-12-09T16:56:35,034 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=43, state=SUCCESS, hasLock=false; DeleteTableProcedure table=TestNs:TestTable in 39 msec 2024-12-09T16:56:35,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=43 2024-12-09T16:56:35,263 DEBUG [RPCClient-NioEventLoopGroup-5-4 {}] client.AsyncRegionLocator(219): Clear meta cache for TestNs:TestTable 2024-12-09T16:56:35,263 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: TestNs:TestTable completed 2024-12-09T16:56:35,266 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] master.HMaster$20(3601): Client=jenkins//172.17.0.2 delete TestNs 2024-12-09T16:56:35,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] procedure2.ProcedureExecutor(1139): Stored pid=44, state=RUNNABLE:DELETE_NAMESPACE_PREPARE, hasLock=false; DeleteNamespaceProcedure, namespace=TestNs 2024-12-09T16:56:35,272 INFO [PEWorker-2 {}] procedure.DeleteNamespaceProcedure(67): pid=44, state=RUNNABLE:DELETE_NAMESPACE_PREPARE, hasLock=true; DeleteNamespaceProcedure, namespace=TestNs 2024-12-09T16:56:35,274 INFO [PEWorker-2 {}] procedure.DeleteNamespaceProcedure(67): pid=44, state=RUNNABLE:DELETE_NAMESPACE_DELETE_FROM_NS_TABLE, hasLock=true; DeleteNamespaceProcedure, namespace=TestNs 2024-12-09T16:56:35,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-12-09T16:56:35,277 INFO [PEWorker-2 {}] procedure.DeleteNamespaceProcedure(67): pid=44, state=RUNNABLE:DELETE_NAMESPACE_DELETE_DIRECTORIES, hasLock=true; DeleteNamespaceProcedure, namespace=TestNs 2024-12-09T16:56:35,280 INFO [PEWorker-2 {}] procedure.DeleteNamespaceProcedure(67): pid=44, state=RUNNABLE:DELETE_NAMESPACE_REMOVE_NAMESPACE_QUOTA, hasLock=true; DeleteNamespaceProcedure, namespace=TestNs 2024-12-09T16:56:35,282 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=44, state=SUCCESS, hasLock=false; DeleteNamespaceProcedure, namespace=TestNs in 14 msec 2024-12-09T16:56:35,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36647 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-12-09T16:56:35,534 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$NamespaceProcedureBiConsumer(2745): Operation: DELETE_NAMESPACE, Namespace: TestNs completed 2024-12-09T16:56:35,534 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T16:56:35,534 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T16:56:35,535 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.tearDownAfterClass(TestClusterScopeQuotaThrottle.java:107) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T16:56:35,543 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T16:56:35,544 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T16:56:35,544 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T16:56:35,544 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T16:56:35,544 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1143773905, stopped=false 2024-12-09T16:56:35,545 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.quotas.MasterQuotasObserver 2024-12-09T16:56:35,545 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=7b4ec1980089,36647,1733763376785 2024-12-09T16:56:35,620 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36153-0x1000b8f605d0002, quorum=127.0.0.1:54803, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T16:56:35,620 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36647-0x1000b8f605d0000, quorum=127.0.0.1:54803, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T16:56:35,620 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34213-0x1000b8f605d0001, quorum=127.0.0.1:54803, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T16:56:35,621 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36153-0x1000b8f605d0002, quorum=127.0.0.1:54803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:56:35,621 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36647-0x1000b8f605d0000, quorum=127.0.0.1:54803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:56:35,621 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34213-0x1000b8f605d0001, quorum=127.0.0.1:54803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:56:35,621 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T16:56:35,622 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T16:56:35,623 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36153-0x1000b8f605d0002, quorum=127.0.0.1:54803, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T16:56:35,623 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36647-0x1000b8f605d0000, quorum=127.0.0.1:54803, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T16:56:35,623 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.tearDownAfterClass(TestClusterScopeQuotaThrottle.java:107) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T16:56:35,623 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34213-0x1000b8f605d0001, quorum=127.0.0.1:54803, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T16:56:35,623 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T16:56:35,624 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '7b4ec1980089,34213,1733763377986' ***** 2024-12-09T16:56:35,624 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T16:56:35,624 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '7b4ec1980089,36153,1733763378299' ***** 2024-12-09T16:56:35,625 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T16:56:35,625 INFO [RS:0;7b4ec1980089:34213 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T16:56:35,625 INFO [RS:1;7b4ec1980089:36153 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T16:56:35,625 INFO [RS:0;7b4ec1980089:34213 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T16:56:35,625 INFO [RS:1;7b4ec1980089:36153 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T16:56:35,625 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T16:56:35,625 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T16:56:35,626 INFO [RS:0;7b4ec1980089:34213 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T16:56:35,626 INFO [RS:1;7b4ec1980089:36153 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T16:56:35,626 INFO [RS:0;7b4ec1980089:34213 {}] regionserver.HRegionServer(3091): Received CLOSE for 59396aaaa39ec96658e63a448b643a55 2024-12-09T16:56:35,626 INFO [RS:1;7b4ec1980089:36153 {}] regionserver.HRegionServer(959): stopping server 7b4ec1980089,36153,1733763378299 2024-12-09T16:56:35,626 INFO [RS:1;7b4ec1980089:36153 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T16:56:35,626 INFO [RS:1;7b4ec1980089:36153 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;7b4ec1980089:36153. 2024-12-09T16:56:35,626 DEBUG [RS:1;7b4ec1980089:36153 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T16:56:35,626 INFO [RS:0;7b4ec1980089:34213 {}] regionserver.HRegionServer(959): stopping server 7b4ec1980089,34213,1733763377986 2024-12-09T16:56:35,626 DEBUG [RS:1;7b4ec1980089:36153 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T16:56:35,626 INFO [RS:0;7b4ec1980089:34213 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T16:56:35,627 INFO [RS:1;7b4ec1980089:36153 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T16:56:35,627 INFO [RS:1;7b4ec1980089:36153 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T16:56:35,627 INFO [RS:1;7b4ec1980089:36153 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T16:56:35,627 INFO [RS:1;7b4ec1980089:36153 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T16:56:35,627 DEBUG [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 59396aaaa39ec96658e63a448b643a55, disabling compactions & flushes 2024-12-09T16:56:35,627 INFO [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region hbase:quota,,1733763381716.59396aaaa39ec96658e63a448b643a55. 2024-12-09T16:56:35,627 DEBUG [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:quota,,1733763381716.59396aaaa39ec96658e63a448b643a55. 2024-12-09T16:56:35,627 INFO [RS:0;7b4ec1980089:34213 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;7b4ec1980089:34213. 2024-12-09T16:56:35,627 DEBUG [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on hbase:quota,,1733763381716.59396aaaa39ec96658e63a448b643a55. after waiting 0 ms 2024-12-09T16:56:35,627 DEBUG [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region hbase:quota,,1733763381716.59396aaaa39ec96658e63a448b643a55. 2024-12-09T16:56:35,627 DEBUG [RS:0;7b4ec1980089:34213 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T16:56:35,627 DEBUG [RS:0;7b4ec1980089:34213 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T16:56:35,627 INFO [RS:1;7b4ec1980089:36153 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-09T16:56:35,627 INFO [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 59396aaaa39ec96658e63a448b643a55 2/2 column families, dataSize=726 B heapSize=2.44 KB 2024-12-09T16:56:35,627 DEBUG [RS_CLOSE_META-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T16:56:35,627 INFO [RS:0;7b4ec1980089:34213 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-09T16:56:35,628 DEBUG [RS:1;7b4ec1980089:36153 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-09T16:56:35,628 INFO [RS_CLOSE_META-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T16:56:35,628 DEBUG [RS:0;7b4ec1980089:34213 {}] regionserver.HRegionServer(1325): Online Regions={59396aaaa39ec96658e63a448b643a55=hbase:quota,,1733763381716.59396aaaa39ec96658e63a448b643a55.} 2024-12-09T16:56:35,628 DEBUG [RS_CLOSE_META-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T16:56:35,628 DEBUG [RS_CLOSE_META-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T16:56:35,628 DEBUG [RS_CLOSE_META-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T16:56:35,628 DEBUG [RS:1;7b4ec1980089:36153 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-09T16:56:35,628 DEBUG [RS:0;7b4ec1980089:34213 {}] regionserver.HRegionServer(1351): Waiting on 59396aaaa39ec96658e63a448b643a55 2024-12-09T16:56:35,628 INFO [RS_CLOSE_META-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=13.33 KB heapSize=24.55 KB 2024-12-09T16:56:35,630 INFO [regionserver/7b4ec1980089:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T16:56:35,633 INFO [regionserver/7b4ec1980089:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T16:56:35,653 DEBUG [RS_CLOSE_META-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/hbase/meta/1588230740/.tmp/info/25afff0f5e034219964066a9578a8323 is 135, key is hbase:quota,,1733763381716.59396aaaa39ec96658e63a448b643a55./info:regioninfo/1733763382343/Put/seqid=0 2024-12-09T16:56:35,654 DEBUG [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/hbase/quota/59396aaaa39ec96658e63a448b643a55/.tmp/q/21c5be80a2074a149f50aa3eb97904b2 is 44, key is u.jenkins/q:s.default:/1733763388342/DeleteColumn/seqid=0 2024-12-09T16:56:35,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39355 is added to blk_1073741848_1024 (size=7362) 2024-12-09T16:56:35,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32855 is added to blk_1073741848_1024 (size=7362) 2024-12-09T16:56:35,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39355 is added to blk_1073741849_1025 (size=5302) 2024-12-09T16:56:35,662 INFO [RS_CLOSE_META-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.80 KB at sequenceid=65 (bloomFilter=true), to=hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/hbase/meta/1588230740/.tmp/info/25afff0f5e034219964066a9578a8323 2024-12-09T16:56:35,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32855 is added to blk_1073741849_1025 (size=5302) 2024-12-09T16:56:35,685 DEBUG [RS_CLOSE_META-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/hbase/meta/1588230740/.tmp/ns/c81b64ea15044ce393e747aae30cf183 is 92, key is TestNs:TestTable,1,1733763385234.434d27bd1b99346c64f656cd110c7695./ns:/1733763395020/DeleteFamily/seqid=0 2024-12-09T16:56:35,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32855 is added to blk_1073741850_1026 (size=5710) 2024-12-09T16:56:35,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39355 is added to blk_1073741850_1026 (size=5710) 2024-12-09T16:56:35,691 INFO [RS_CLOSE_META-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=572 B at sequenceid=65 (bloomFilter=true), to=hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/hbase/meta/1588230740/.tmp/ns/c81b64ea15044ce393e747aae30cf183 2024-12-09T16:56:35,711 INFO [regionserver/7b4ec1980089:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-09T16:56:35,711 INFO [regionserver/7b4ec1980089:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-09T16:56:35,715 DEBUG [RS_CLOSE_META-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/hbase/meta/1588230740/.tmp/rep_barrier/dfcf6d8d86de483aab560231dfe5ceee is 101, key is TestNs:TestTable,1,1733763385234.434d27bd1b99346c64f656cd110c7695./rep_barrier:/1733763395020/DeleteFamily/seqid=0 2024-12-09T16:56:35,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39355 is added to blk_1073741851_1027 (size=5823) 2024-12-09T16:56:35,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32855 is added to blk_1073741851_1027 (size=5823) 2024-12-09T16:56:35,722 INFO [RS_CLOSE_META-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=515 B at sequenceid=65 (bloomFilter=true), to=hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/hbase/meta/1588230740/.tmp/rep_barrier/dfcf6d8d86de483aab560231dfe5ceee 2024-12-09T16:56:35,745 DEBUG [RS_CLOSE_META-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/hbase/meta/1588230740/.tmp/table/bae9e4f93bb648e6947c0a55bcf692ea is 95, key is TestNs:TestTable,1,1733763385234.434d27bd1b99346c64f656cd110c7695./table:/1733763395020/DeleteFamily/seqid=0 2024-12-09T16:56:35,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32855 is added to blk_1073741852_1028 (size=5966) 2024-12-09T16:56:35,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39355 is added to blk_1073741852_1028 (size=5966) 2024-12-09T16:56:35,762 INFO [RS_CLOSE_META-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.47 KB at sequenceid=65 (bloomFilter=true), to=hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/hbase/meta/1588230740/.tmp/table/bae9e4f93bb648e6947c0a55bcf692ea 2024-12-09T16:56:35,773 DEBUG [RS_CLOSE_META-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/hbase/meta/1588230740/.tmp/info/25afff0f5e034219964066a9578a8323 as hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/hbase/meta/1588230740/info/25afff0f5e034219964066a9578a8323 2024-12-09T16:56:35,776 INFO [regionserver/7b4ec1980089:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-09T16:56:35,777 INFO [regionserver/7b4ec1980089:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-09T16:56:35,783 INFO [RS_CLOSE_META-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/hbase/meta/1588230740/info/25afff0f5e034219964066a9578a8323, entries=21, sequenceid=65, filesize=7.2 K 2024-12-09T16:56:35,783 INFO [regionserver/7b4ec1980089:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: SpaceQuotaRefresherChore was stopped 2024-12-09T16:56:35,785 DEBUG [RS_CLOSE_META-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/hbase/meta/1588230740/.tmp/ns/c81b64ea15044ce393e747aae30cf183 as hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/hbase/meta/1588230740/ns/c81b64ea15044ce393e747aae30cf183 2024-12-09T16:56:35,786 INFO [regionserver/7b4ec1980089:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: SpaceQuotaRefresherChore was stopped 2024-12-09T16:56:35,794 INFO [RS_CLOSE_META-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/hbase/meta/1588230740/ns/c81b64ea15044ce393e747aae30cf183, entries=8, sequenceid=65, filesize=5.6 K 2024-12-09T16:56:35,795 DEBUG [RS_CLOSE_META-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/hbase/meta/1588230740/.tmp/rep_barrier/dfcf6d8d86de483aab560231dfe5ceee as hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/hbase/meta/1588230740/rep_barrier/dfcf6d8d86de483aab560231dfe5ceee 2024-12-09T16:56:35,803 INFO [RS_CLOSE_META-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/hbase/meta/1588230740/rep_barrier/dfcf6d8d86de483aab560231dfe5ceee, entries=6, sequenceid=65, filesize=5.7 K 2024-12-09T16:56:35,805 DEBUG [RS_CLOSE_META-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/hbase/meta/1588230740/.tmp/table/bae9e4f93bb648e6947c0a55bcf692ea as hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/hbase/meta/1588230740/table/bae9e4f93bb648e6947c0a55bcf692ea 2024-12-09T16:56:35,813 INFO [RS_CLOSE_META-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/hbase/meta/1588230740/table/bae9e4f93bb648e6947c0a55bcf692ea, entries=12, sequenceid=65, filesize=5.8 K 2024-12-09T16:56:35,814 INFO [RS_CLOSE_META-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~13.33 KB/13653, heapSize ~24.48 KB/25072, currentSize=0 B/0 for 1588230740 in 186ms, sequenceid=65, compaction requested=false 2024-12-09T16:56:35,824 DEBUG [RS_CLOSE_META-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/hbase/meta/1588230740/recovered.edits/68.seqid, newMaxSeqId=68, maxSeqId=1 2024-12-09T16:56:35,825 DEBUG [RS_CLOSE_META-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T16:56:35,825 INFO [RS_CLOSE_META-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T16:56:35,825 DEBUG [RS_CLOSE_META-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733763395627Running coprocessor pre-close hooks at 1733763395627Disabling compacts and flushes for region at 1733763395627Disabling writes for close at 1733763395628 (+1 ms)Obtaining lock to block concurrent updates at 1733763395628Preparing flush snapshotting stores in 1588230740 at 1733763395628Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=13653, getHeapSize=25072, getOffHeapSize=0, getCellsCount=139 at 1733763395629 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733763395629Flushing 1588230740/info: creating writer at 1733763395630 (+1 ms)Flushing 1588230740/info: appending metadata at 1733763395647 (+17 ms)Flushing 1588230740/info: closing flushed file at 1733763395648 (+1 ms)Flushing 1588230740/ns: creating writer at 1733763395670 (+22 ms)Flushing 1588230740/ns: appending metadata at 1733763395684 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1733763395684Flushing 1588230740/rep_barrier: creating writer at 1733763395699 (+15 ms)Flushing 1588230740/rep_barrier: appending metadata at 1733763395715 (+16 ms)Flushing 1588230740/rep_barrier: closing flushed file at 1733763395715Flushing 1588230740/table: creating writer at 1733763395729 (+14 ms)Flushing 1588230740/table: appending metadata at 1733763395745 (+16 ms)Flushing 1588230740/table: closing flushed file at 1733763395745Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@ab12d65: reopening flushed file at 1733763395772 (+27 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7a110085: reopening flushed file at 1733763395783 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@37687cf7: reopening flushed file at 1733763395794 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2ba7342b: reopening flushed file at 1733763395803 (+9 ms)Finished flush of dataSize ~13.33 KB/13653, heapSize ~24.48 KB/25072, currentSize=0 B/0 for 1588230740 in 186ms, sequenceid=65, compaction requested=false at 1733763395814 (+11 ms)Writing region close event to WAL at 1733763395819 (+5 ms)Running coprocessor post-close hooks at 1733763395825 (+6 ms)Closed at 1733763395825 2024-12-09T16:56:35,825 DEBUG [RS_CLOSE_META-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T16:56:35,828 DEBUG [RS:0;7b4ec1980089:34213 {}] regionserver.HRegionServer(1351): Waiting on 59396aaaa39ec96658e63a448b643a55 2024-12-09T16:56:35,828 INFO [RS:1;7b4ec1980089:36153 {}] regionserver.HRegionServer(976): stopping server 7b4ec1980089,36153,1733763378299; all regions closed. 2024-12-09T16:56:35,829 DEBUG [RS:1;7b4ec1980089:36153 {}] quotas.QuotaCache(122): Stopping QuotaRefresherChore chore. 2024-12-09T16:56:35,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39355 is added to blk_1073741835_1011 (size=17505) 2024-12-09T16:56:35,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32855 is added to blk_1073741835_1011 (size=17505) 2024-12-09T16:56:35,837 DEBUG [RS:1;7b4ec1980089:36153 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/oldWALs 2024-12-09T16:56:35,837 INFO [RS:1;7b4ec1980089:36153 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 7b4ec1980089%2C36153%2C1733763378299.meta:.meta(num 1733763381354) 2024-12-09T16:56:35,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32855 is added to blk_1073741834_1010 (size=1253) 2024-12-09T16:56:35,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39355 is added to blk_1073741834_1010 (size=1253) 2024-12-09T16:56:35,842 DEBUG [RS:1;7b4ec1980089:36153 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/oldWALs 2024-12-09T16:56:35,842 INFO [RS:1;7b4ec1980089:36153 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 7b4ec1980089%2C36153%2C1733763378299:(num 1733763380901) 2024-12-09T16:56:35,843 DEBUG [RS:1;7b4ec1980089:36153 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T16:56:35,843 INFO [RS:1;7b4ec1980089:36153 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T16:56:35,843 INFO [RS:1;7b4ec1980089:36153 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T16:56:35,843 INFO [RS:1;7b4ec1980089:36153 {}] hbase.ChoreService(370): Chore service for: regionserver/7b4ec1980089:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-09T16:56:35,843 INFO [RS:1;7b4ec1980089:36153 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T16:56:35,843 INFO [regionserver/7b4ec1980089:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T16:56:35,843 INFO [RS:1;7b4ec1980089:36153 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36153 2024-12-09T16:56:35,874 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36647-0x1000b8f605d0000, quorum=127.0.0.1:54803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T16:56:35,874 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36153-0x1000b8f605d0002, quorum=127.0.0.1:54803, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7b4ec1980089,36153,1733763378299 2024-12-09T16:56:35,874 INFO [RS:1;7b4ec1980089:36153 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T16:56:35,884 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7b4ec1980089,36153,1733763378299] 2024-12-09T16:56:35,892 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/7b4ec1980089,36153,1733763378299 already deleted, retry=false 2024-12-09T16:56:35,892 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 7b4ec1980089,36153,1733763378299 expired; onlineServers=1 2024-12-09T16:56:35,984 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36153-0x1000b8f605d0002, quorum=127.0.0.1:54803, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T16:56:35,984 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36153-0x1000b8f605d0002, quorum=127.0.0.1:54803, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T16:56:35,985 INFO [RS:1;7b4ec1980089:36153 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T16:56:35,985 INFO [RS:1;7b4ec1980089:36153 {}] regionserver.HRegionServer(1031): Exiting; stopping=7b4ec1980089,36153,1733763378299; zookeeper connection closed. 2024-12-09T16:56:35,985 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5d2cd812 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5d2cd812 2024-12-09T16:56:36,029 DEBUG [RS:0;7b4ec1980089:34213 {}] regionserver.HRegionServer(1351): Waiting on 59396aaaa39ec96658e63a448b643a55 2024-12-09T16:56:36,064 INFO [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=597 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/hbase/quota/59396aaaa39ec96658e63a448b643a55/.tmp/q/21c5be80a2074a149f50aa3eb97904b2 2024-12-09T16:56:36,077 INFO [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 21c5be80a2074a149f50aa3eb97904b2 2024-12-09T16:56:36,093 DEBUG [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/hbase/quota/59396aaaa39ec96658e63a448b643a55/.tmp/u/681e769bfc7348d1bdef15e3540aab97 is 43, key is t.TestNs:TestTable/u:/1733763390997/DeleteFamily/seqid=0 2024-12-09T16:56:36,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32855 is added to blk_1073741853_1029 (size=5250) 2024-12-09T16:56:36,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39355 is added to blk_1073741853_1029 (size=5250) 2024-12-09T16:56:36,229 DEBUG [RS:0;7b4ec1980089:34213 {}] regionserver.HRegionServer(1351): Waiting on 59396aaaa39ec96658e63a448b643a55 2024-12-09T16:56:36,429 DEBUG [RS:0;7b4ec1980089:34213 {}] regionserver.HRegionServer(1351): Waiting on 59396aaaa39ec96658e63a448b643a55 2024-12-09T16:56:36,500 INFO [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=129 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/hbase/quota/59396aaaa39ec96658e63a448b643a55/.tmp/u/681e769bfc7348d1bdef15e3540aab97 2024-12-09T16:56:36,513 INFO [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 681e769bfc7348d1bdef15e3540aab97 2024-12-09T16:56:36,514 DEBUG [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/hbase/quota/59396aaaa39ec96658e63a448b643a55/.tmp/q/21c5be80a2074a149f50aa3eb97904b2 as hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/hbase/quota/59396aaaa39ec96658e63a448b643a55/q/21c5be80a2074a149f50aa3eb97904b2 2024-12-09T16:56:36,521 INFO [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 21c5be80a2074a149f50aa3eb97904b2 2024-12-09T16:56:36,521 INFO [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/hbase/quota/59396aaaa39ec96658e63a448b643a55/q/21c5be80a2074a149f50aa3eb97904b2, entries=5, sequenceid=17, filesize=5.2 K 2024-12-09T16:56:36,522 DEBUG [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/hbase/quota/59396aaaa39ec96658e63a448b643a55/.tmp/u/681e769bfc7348d1bdef15e3540aab97 as hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/hbase/quota/59396aaaa39ec96658e63a448b643a55/u/681e769bfc7348d1bdef15e3540aab97 2024-12-09T16:56:36,530 INFO [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 681e769bfc7348d1bdef15e3540aab97 2024-12-09T16:56:36,530 INFO [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/hbase/quota/59396aaaa39ec96658e63a448b643a55/u/681e769bfc7348d1bdef15e3540aab97, entries=4, sequenceid=17, filesize=5.1 K 2024-12-09T16:56:36,531 INFO [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~726 B/726, heapSize ~2.41 KB/2464, currentSize=0 B/0 for 59396aaaa39ec96658e63a448b643a55 in 904ms, sequenceid=17, compaction requested=false 2024-12-09T16:56:36,537 DEBUG [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/data/hbase/quota/59396aaaa39ec96658e63a448b643a55/recovered.edits/20.seqid, newMaxSeqId=20, maxSeqId=1 2024-12-09T16:56:36,537 INFO [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed hbase:quota,,1733763381716.59396aaaa39ec96658e63a448b643a55. 2024-12-09T16:56:36,537 DEBUG [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 59396aaaa39ec96658e63a448b643a55: Waiting for close lock at 1733763395626Running coprocessor pre-close hooks at 1733763395627 (+1 ms)Disabling compacts and flushes for region at 1733763395627Disabling writes for close at 1733763395627Obtaining lock to block concurrent updates at 1733763395628 (+1 ms)Preparing flush snapshotting stores in 59396aaaa39ec96658e63a448b643a55 at 1733763395628Finished memstore snapshotting hbase:quota,,1733763381716.59396aaaa39ec96658e63a448b643a55., syncing WAL and waiting on mvcc, flushsize=dataSize=726, getHeapSize=2464, getOffHeapSize=0, getCellsCount=17 at 1733763395628Flushing stores of hbase:quota,,1733763381716.59396aaaa39ec96658e63a448b643a55. at 1733763395629 (+1 ms)Flushing 59396aaaa39ec96658e63a448b643a55/q: creating writer at 1733763395629Flushing 59396aaaa39ec96658e63a448b643a55/q: appending metadata at 1733763395648 (+19 ms)Flushing 59396aaaa39ec96658e63a448b643a55/q: closing flushed file at 1733763395649 (+1 ms)Flushing 59396aaaa39ec96658e63a448b643a55/u: creating writer at 1733763396077 (+428 ms)Flushing 59396aaaa39ec96658e63a448b643a55/u: appending metadata at 1733763396092 (+15 ms)Flushing 59396aaaa39ec96658e63a448b643a55/u: closing flushed file at 1733763396092Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5f35ff68: reopening flushed file at 1733763396513 (+421 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@111c41de: reopening flushed file at 1733763396521 (+8 ms)Finished flush of dataSize ~726 B/726, heapSize ~2.41 KB/2464, currentSize=0 B/0 for 59396aaaa39ec96658e63a448b643a55 in 904ms, sequenceid=17, compaction requested=false at 1733763396531 (+10 ms)Writing region close event to WAL at 1733763396532 (+1 ms)Running coprocessor post-close hooks at 1733763396537 (+5 ms)Closed at 1733763396537 2024-12-09T16:56:36,538 DEBUG [RS_CLOSE_REGION-regionserver/7b4ec1980089:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:quota,,1733763381716.59396aaaa39ec96658e63a448b643a55. 2024-12-09T16:56:36,630 INFO [RS:0;7b4ec1980089:34213 {}] regionserver.HRegionServer(976): stopping server 7b4ec1980089,34213,1733763377986; all regions closed. 2024-12-09T16:56:36,630 DEBUG [RS:0;7b4ec1980089:34213 {}] quotas.QuotaCache(122): Stopping QuotaRefresherChore chore. 2024-12-09T16:56:36,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39355 is added to blk_1073741833_1009 (size=6729) 2024-12-09T16:56:36,633 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/WALs/7b4ec1980089,34213,1733763377986/7b4ec1980089%2C34213%2C1733763377986.1733763380896 not finished, retry = 0 2024-12-09T16:56:36,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32855 is added to blk_1073741833_1009 (size=6729) 2024-12-09T16:56:36,737 DEBUG [RS:0;7b4ec1980089:34213 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/oldWALs 2024-12-09T16:56:36,737 INFO [RS:0;7b4ec1980089:34213 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 7b4ec1980089%2C34213%2C1733763377986:(num 1733763380896) 2024-12-09T16:56:36,738 DEBUG [RS:0;7b4ec1980089:34213 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T16:56:36,738 INFO [RS:0;7b4ec1980089:34213 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T16:56:36,738 INFO [RS:0;7b4ec1980089:34213 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T16:56:36,738 INFO [RS:0;7b4ec1980089:34213 {}] hbase.ChoreService(370): Chore service for: regionserver/7b4ec1980089:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-09T16:56:36,738 INFO [RS:0;7b4ec1980089:34213 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T16:56:36,738 INFO [regionserver/7b4ec1980089:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T16:56:36,738 INFO [RS:0;7b4ec1980089:34213 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T16:56:36,738 INFO [RS:0;7b4ec1980089:34213 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T16:56:36,738 INFO [RS:0;7b4ec1980089:34213 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T16:56:36,739 INFO [RS:0;7b4ec1980089:34213 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34213 2024-12-09T16:56:36,792 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34213-0x1000b8f605d0001, quorum=127.0.0.1:54803, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7b4ec1980089,34213,1733763377986 2024-12-09T16:56:36,792 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36647-0x1000b8f605d0000, quorum=127.0.0.1:54803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T16:56:36,792 INFO [RS:0;7b4ec1980089:34213 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T16:56:36,800 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7b4ec1980089,34213,1733763377986] 2024-12-09T16:56:36,808 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/7b4ec1980089,34213,1733763377986 already deleted, retry=false 2024-12-09T16:56:36,808 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 7b4ec1980089,34213,1733763377986 expired; onlineServers=0 2024-12-09T16:56:36,809 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '7b4ec1980089,36647,1733763376785' ***** 2024-12-09T16:56:36,809 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T16:56:36,809 INFO [M:0;7b4ec1980089:36647 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T16:56:36,809 INFO [M:0;7b4ec1980089:36647 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T16:56:36,809 DEBUG [M:0;7b4ec1980089:36647 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T16:56:36,810 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T16:56:36,810 DEBUG [M:0;7b4ec1980089:36647 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T16:56:36,810 DEBUG [master/7b4ec1980089:0:becomeActiveMaster-HFileCleaner.large.0-1733763380441 {}] cleaner.HFileCleaner(306): Exit Thread[master/7b4ec1980089:0:becomeActiveMaster-HFileCleaner.large.0-1733763380441,5,FailOnTimeoutGroup] 2024-12-09T16:56:36,810 DEBUG [master/7b4ec1980089:0:becomeActiveMaster-HFileCleaner.small.0-1733763380448 {}] cleaner.HFileCleaner(306): Exit Thread[master/7b4ec1980089:0:becomeActiveMaster-HFileCleaner.small.0-1733763380448,5,FailOnTimeoutGroup] 2024-12-09T16:56:36,810 INFO [M:0;7b4ec1980089:36647 {}] hbase.ChoreService(370): Chore service for: master/7b4ec1980089:0 had [ScheduledChore name=QuotaObserverChore, period=60000, unit=MILLISECONDS, ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T16:56:36,810 INFO [M:0;7b4ec1980089:36647 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T16:56:36,811 DEBUG [M:0;7b4ec1980089:36647 {}] master.HMaster(1795): Stopping service threads 2024-12-09T16:56:36,811 INFO [M:0;7b4ec1980089:36647 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T16:56:36,811 INFO [M:0;7b4ec1980089:36647 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T16:56:36,812 ERROR [M:0;7b4ec1980089:36647 {}] procedure2.ProcedureExecutor(763): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[HFileArchiver-1,5,PEWorkerGroup] Thread[HFileArchiver-2,5,PEWorkerGroup] Thread[HFileArchiver-3,5,PEWorkerGroup] Thread[HFileArchiver-4,5,PEWorkerGroup] Thread[HFileArchiver-5,5,PEWorkerGroup] 2024-12-09T16:56:36,813 INFO [M:0;7b4ec1980089:36647 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T16:56:36,813 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T16:56:36,817 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36647-0x1000b8f605d0000, quorum=127.0.0.1:54803, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T16:56:36,817 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36647-0x1000b8f605d0000, quorum=127.0.0.1:54803, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:56:36,817 DEBUG [M:0;7b4ec1980089:36647 {}] zookeeper.ZKUtil(347): master:36647-0x1000b8f605d0000, quorum=127.0.0.1:54803, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-09T16:56:36,817 WARN [M:0;7b4ec1980089:36647 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-09T16:56:36,818 INFO [M:0;7b4ec1980089:36647 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/.lastflushedseqids 2024-12-09T16:56:36,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32855 is added to blk_1073741854_1030 (size=134) 2024-12-09T16:56:36,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39355 is added to blk_1073741854_1030 (size=134) 2024-12-09T16:56:36,829 INFO [M:0;7b4ec1980089:36647 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T16:56:36,830 INFO [M:0;7b4ec1980089:36647 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T16:56:36,830 DEBUG [M:0;7b4ec1980089:36647 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T16:56:36,830 INFO [M:0;7b4ec1980089:36647 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:56:36,830 DEBUG [M:0;7b4ec1980089:36647 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:56:36,830 DEBUG [M:0;7b4ec1980089:36647 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T16:56:36,830 DEBUG [M:0;7b4ec1980089:36647 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:56:36,830 INFO [M:0;7b4ec1980089:36647 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=157.71 KB heapSize=190.78 KB 2024-12-09T16:56:36,850 DEBUG [M:0;7b4ec1980089:36647 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4ff0570bd59c46d98bd6a1cb41ec8b59 is 82, key is hbase:meta,,1/info:regioninfo/1733763381453/Put/seqid=0 2024-12-09T16:56:36,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39355 is added to blk_1073741855_1031 (size=5672) 2024-12-09T16:56:36,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32855 is added to blk_1073741855_1031 (size=5672) 2024-12-09T16:56:36,857 INFO [M:0;7b4ec1980089:36647 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=375 (bloomFilter=true), to=hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4ff0570bd59c46d98bd6a1cb41ec8b59 2024-12-09T16:56:36,885 DEBUG [M:0;7b4ec1980089:36647 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e71ca2987aa5479ebed07454f82ebcf9 is 958, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733763382380/Put/seqid=0 2024-12-09T16:56:36,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39355 is added to blk_1073741856_1032 (size=13432) 2024-12-09T16:56:36,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32855 is added to blk_1073741856_1032 (size=13432) 2024-12-09T16:56:36,892 INFO [M:0;7b4ec1980089:36647 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=157.09 KB at sequenceid=375 (bloomFilter=true), to=hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e71ca2987aa5479ebed07454f82ebcf9 2024-12-09T16:56:36,900 INFO [RS:0;7b4ec1980089:34213 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T16:56:36,900 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34213-0x1000b8f605d0001, quorum=127.0.0.1:54803, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T16:56:36,900 INFO [RS:0;7b4ec1980089:34213 {}] regionserver.HRegionServer(1031): Exiting; stopping=7b4ec1980089,34213,1733763377986; zookeeper connection closed. 2024-12-09T16:56:36,900 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34213-0x1000b8f605d0001, quorum=127.0.0.1:54803, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T16:56:36,901 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@604af764 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@604af764 2024-12-09T16:56:36,901 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-12-09T16:56:36,913 DEBUG [M:0;7b4ec1980089:36647 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/27ff54ec061248a3851b69e434fdc15d is 69, key is 7b4ec1980089,34213,1733763377986/rs:state/1733763380508/Put/seqid=0 2024-12-09T16:56:36,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39355 is added to blk_1073741857_1033 (size=5224) 2024-12-09T16:56:36,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32855 is added to blk_1073741857_1033 (size=5224) 2024-12-09T16:56:36,920 INFO [M:0;7b4ec1980089:36647 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=375 (bloomFilter=true), to=hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/27ff54ec061248a3851b69e434fdc15d 2024-12-09T16:56:36,928 DEBUG [M:0;7b4ec1980089:36647 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4ff0570bd59c46d98bd6a1cb41ec8b59 as hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4ff0570bd59c46d98bd6a1cb41ec8b59 2024-12-09T16:56:36,936 INFO [M:0;7b4ec1980089:36647 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4ff0570bd59c46d98bd6a1cb41ec8b59, entries=8, sequenceid=375, filesize=5.5 K 2024-12-09T16:56:36,937 DEBUG [M:0;7b4ec1980089:36647 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e71ca2987aa5479ebed07454f82ebcf9 as hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/e71ca2987aa5479ebed07454f82ebcf9 2024-12-09T16:56:36,944 INFO [M:0;7b4ec1980089:36647 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/e71ca2987aa5479ebed07454f82ebcf9, entries=44, sequenceid=375, filesize=13.1 K 2024-12-09T16:56:36,945 DEBUG [M:0;7b4ec1980089:36647 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/27ff54ec061248a3851b69e434fdc15d as hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/27ff54ec061248a3851b69e434fdc15d 2024-12-09T16:56:36,953 INFO [M:0;7b4ec1980089:36647 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34305/user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/27ff54ec061248a3851b69e434fdc15d, entries=2, sequenceid=375, filesize=5.1 K 2024-12-09T16:56:36,954 INFO [M:0;7b4ec1980089:36647 {}] regionserver.HRegion(3140): Finished flush of dataSize ~157.71 KB/161496, heapSize ~190.48 KB/195056, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 124ms, sequenceid=375, compaction requested=false 2024-12-09T16:56:36,955 INFO [M:0;7b4ec1980089:36647 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:56:36,955 DEBUG [M:0;7b4ec1980089:36647 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733763396830Disabling compacts and flushes for region at 1733763396830Disabling writes for close at 1733763396830Obtaining lock to block concurrent updates at 1733763396830Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733763396830Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=161496, getHeapSize=195296, getOffHeapSize=0, getCellsCount=434 at 1733763396830Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733763396831 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733763396831Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733763396850 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733763396850Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733763396863 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733763396885 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733763396885Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733763396899 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733763396913 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733763396913Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5cf5cc2: reopening flushed file at 1733763396927 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2471e60a: reopening flushed file at 1733763396936 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3a0532d7: reopening flushed file at 1733763396944 (+8 ms)Finished flush of dataSize ~157.71 KB/161496, heapSize ~190.48 KB/195056, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 124ms, sequenceid=375, compaction requested=false at 1733763396954 (+10 ms)Writing region close event to WAL at 1733763396955 (+1 ms)Closed at 1733763396955 2024-12-09T16:56:36,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39355 is added to blk_1073741830_1006 (size=186526) 2024-12-09T16:56:36,958 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /user/jenkins/test-data/bc6d47bd-eecb-0546-f60d-99969a554cfd/MasterData/WALs/7b4ec1980089,36647,1733763376785/7b4ec1980089%2C36647%2C1733763376785.1733763379330 not finished, retry = 0 2024-12-09T16:56:36,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32855 is added to blk_1073741830_1006 (size=186526) 2024-12-09T16:56:37,060 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T16:56:37,060 INFO [M:0;7b4ec1980089:36647 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-09T16:56:37,060 INFO [M:0;7b4ec1980089:36647 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36647 2024-12-09T16:56:37,060 INFO [M:0;7b4ec1980089:36647 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T16:56:37,182 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36647-0x1000b8f605d0000, quorum=127.0.0.1:54803, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T16:56:37,182 INFO [M:0;7b4ec1980089:36647 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T16:56:37,183 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36647-0x1000b8f605d0000, quorum=127.0.0.1:54803, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T16:56:37,235 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@546ff28f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T16:56:37,238 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1852a3e1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T16:56:37,238 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T16:56:37,239 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3ffa6420{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T16:56:37,239 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5547b1d3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45b4b7c4-6fbe-05c7-b282-c80884884d21/hadoop.log.dir/,STOPPED} 2024-12-09T16:56:37,244 WARN [BP-749996034-172.17.0.2-1733763370643 heartbeating to localhost/127.0.0.1:34305 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T16:56:37,244 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T16:56:37,244 WARN [BP-749996034-172.17.0.2-1733763370643 heartbeating to localhost/127.0.0.1:34305 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-749996034-172.17.0.2-1733763370643 (Datanode Uuid 60a74b0c-cca6-4717-9239-01383c2c5f4c) service to localhost/127.0.0.1:34305 2024-12-09T16:56:37,244 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T16:56:37,246 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45b4b7c4-6fbe-05c7-b282-c80884884d21/cluster_11d82abb-857b-0c7e-56cd-942d2bbc8a40/data/data3/current/BP-749996034-172.17.0.2-1733763370643 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T16:56:37,246 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45b4b7c4-6fbe-05c7-b282-c80884884d21/cluster_11d82abb-857b-0c7e-56cd-942d2bbc8a40/data/data4/current/BP-749996034-172.17.0.2-1733763370643 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T16:56:37,246 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T16:56:37,248 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6f34895c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T16:56:37,248 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@695fa2ad{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T16:56:37,248 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T16:56:37,249 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@b94fc88{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T16:56:37,249 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5cb3ceaf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45b4b7c4-6fbe-05c7-b282-c80884884d21/hadoop.log.dir/,STOPPED} 2024-12-09T16:56:37,250 WARN [BP-749996034-172.17.0.2-1733763370643 heartbeating to localhost/127.0.0.1:34305 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T16:56:37,250 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T16:56:37,250 WARN [BP-749996034-172.17.0.2-1733763370643 heartbeating to localhost/127.0.0.1:34305 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-749996034-172.17.0.2-1733763370643 (Datanode Uuid 33066ccb-b9e7-48ff-8d2e-71ab2517cb39) service to localhost/127.0.0.1:34305 2024-12-09T16:56:37,250 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T16:56:37,251 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45b4b7c4-6fbe-05c7-b282-c80884884d21/cluster_11d82abb-857b-0c7e-56cd-942d2bbc8a40/data/data1/current/BP-749996034-172.17.0.2-1733763370643 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T16:56:37,251 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45b4b7c4-6fbe-05c7-b282-c80884884d21/cluster_11d82abb-857b-0c7e-56cd-942d2bbc8a40/data/data2/current/BP-749996034-172.17.0.2-1733763370643 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T16:56:37,251 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T16:56:37,258 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@64e450a9{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T16:56:37,259 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4f72973e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T16:56:37,259 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T16:56:37,259 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@13ffc098{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T16:56:37,259 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6dc391a7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/45b4b7c4-6fbe-05c7-b282-c80884884d21/hadoop.log.dir/,STOPPED} 2024-12-09T16:56:37,268 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-09T16:56:37,299 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down