2024-11-09 21:39:18,611 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@6f204a1a 2024-11-09 21:39:18,627 main DEBUG Took 0.013567 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-09 21:39:18,628 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-09 21:39:18,628 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-09 21:39:18,629 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-09 21:39:18,631 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 21:39:18,645 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-09 21:39:18,662 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 21:39:18,664 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 21:39:18,665 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 21:39:18,665 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 21:39:18,666 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 21:39:18,667 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 21:39:18,668 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 21:39:18,668 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 21:39:18,669 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 21:39:18,670 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 21:39:18,671 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 21:39:18,671 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 21:39:18,672 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 21:39:18,673 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 21:39:18,674 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 21:39:18,674 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 21:39:18,675 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 21:39:18,675 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 21:39:18,676 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 21:39:18,676 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 21:39:18,677 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 21:39:18,677 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 21:39:18,678 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 21:39:18,678 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 21:39:18,679 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 21:39:18,680 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-09 21:39:18,681 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 21:39:18,683 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-09 21:39:18,686 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-09 21:39:18,687 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-09 21:39:18,688 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-09 21:39:18,689 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-09 21:39:18,699 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-09 21:39:18,702 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-09 21:39:18,705 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-09 21:39:18,705 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-09 21:39:18,706 main DEBUG createAppenders(={Console}) 2024-11-09 21:39:18,707 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@6f204a1a initialized 2024-11-09 21:39:18,707 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@6f204a1a 2024-11-09 21:39:18,708 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@6f204a1a OK. 2024-11-09 21:39:18,709 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-09 21:39:18,709 main DEBUG OutputStream closed 2024-11-09 21:39:18,709 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-09 21:39:18,710 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-09 21:39:18,710 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@21e360a OK 2024-11-09 21:39:18,802 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-09 21:39:18,804 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-09 21:39:18,805 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-09 21:39:18,807 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-09 21:39:18,807 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-09 21:39:18,808 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-09 21:39:18,808 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-09 21:39:18,808 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-09 21:39:18,809 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-09 21:39:18,809 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-09 21:39:18,810 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-09 21:39:18,810 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-09 21:39:18,811 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-09 21:39:18,811 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-09 21:39:18,811 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-09 21:39:18,812 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-09 21:39:18,812 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-09 21:39:18,813 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-09 21:39:18,816 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-09 21:39:18,816 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@42b02722) with optional ClassLoader: null 2024-11-09 21:39:18,817 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-09 21:39:18,817 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@42b02722] started OK. 2024-11-09T21:39:18,833 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle timeout: 13 mins 2024-11-09 21:39:18,836 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-09 21:39:18,837 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-09T21:39:19,143 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91f2bcce-8980-f42f-2f69-e9f1a8e401b3 2024-11-09T21:39:19,162 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=2, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-09T21:39:19,181 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91f2bcce-8980-f42f-2f69-e9f1a8e401b3/cluster_719ad75b-0962-1c01-5841-5a2981114368, deleteOnExit=true 2024-11-09T21:39:19,181 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-09T21:39:19,182 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91f2bcce-8980-f42f-2f69-e9f1a8e401b3/test.cache.data in system properties and HBase conf 2024-11-09T21:39:19,183 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91f2bcce-8980-f42f-2f69-e9f1a8e401b3/hadoop.tmp.dir in system properties and HBase conf 2024-11-09T21:39:19,183 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91f2bcce-8980-f42f-2f69-e9f1a8e401b3/hadoop.log.dir in system properties and HBase conf 2024-11-09T21:39:19,184 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91f2bcce-8980-f42f-2f69-e9f1a8e401b3/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-09T21:39:19,185 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91f2bcce-8980-f42f-2f69-e9f1a8e401b3/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-09T21:39:19,185 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-09T21:39:19,282 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-09T21:39:19,399 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-09T21:39:19,405 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91f2bcce-8980-f42f-2f69-e9f1a8e401b3/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-09T21:39:19,405 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91f2bcce-8980-f42f-2f69-e9f1a8e401b3/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-09T21:39:19,406 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91f2bcce-8980-f42f-2f69-e9f1a8e401b3/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-09T21:39:19,407 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91f2bcce-8980-f42f-2f69-e9f1a8e401b3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-09T21:39:19,408 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91f2bcce-8980-f42f-2f69-e9f1a8e401b3/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-09T21:39:19,409 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91f2bcce-8980-f42f-2f69-e9f1a8e401b3/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-09T21:39:19,409 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91f2bcce-8980-f42f-2f69-e9f1a8e401b3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-09T21:39:19,410 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91f2bcce-8980-f42f-2f69-e9f1a8e401b3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-09T21:39:19,411 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91f2bcce-8980-f42f-2f69-e9f1a8e401b3/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-09T21:39:19,411 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91f2bcce-8980-f42f-2f69-e9f1a8e401b3/nfs.dump.dir in system properties and HBase conf 2024-11-09T21:39:19,412 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91f2bcce-8980-f42f-2f69-e9f1a8e401b3/java.io.tmpdir in system properties and HBase conf 2024-11-09T21:39:19,412 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91f2bcce-8980-f42f-2f69-e9f1a8e401b3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-09T21:39:19,413 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91f2bcce-8980-f42f-2f69-e9f1a8e401b3/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-09T21:39:19,413 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91f2bcce-8980-f42f-2f69-e9f1a8e401b3/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-09T21:39:20,497 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-09T21:39:20,578 INFO [Time-limited test {}] log.Log(170): Logging initialized @2724ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-09T21:39:20,653 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-09T21:39:20,725 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-09T21:39:20,749 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-09T21:39:20,750 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-09T21:39:20,751 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-09T21:39:20,765 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-09T21:39:20,769 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@73ed0cb3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91f2bcce-8980-f42f-2f69-e9f1a8e401b3/hadoop.log.dir/,AVAILABLE} 2024-11-09T21:39:20,770 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@663b1cba{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-09T21:39:20,986 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@70f1962d{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91f2bcce-8980-f42f-2f69-e9f1a8e401b3/java.io.tmpdir/jetty-localhost-35043-hadoop-hdfs-3_4_1-tests_jar-_-any-15491657621121278874/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-09T21:39:20,994 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7c0994b2{HTTP/1.1, (http/1.1)}{localhost:35043} 2024-11-09T21:39:20,994 INFO [Time-limited test {}] server.Server(415): Started @3141ms 2024-11-09T21:39:21,564 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-09T21:39:21,576 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-09T21:39:21,594 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-09T21:39:21,594 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-09T21:39:21,594 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-09T21:39:21,596 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@180f6c17{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91f2bcce-8980-f42f-2f69-e9f1a8e401b3/hadoop.log.dir/,AVAILABLE} 2024-11-09T21:39:21,597 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3768391{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-09T21:39:21,724 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2c7e6768{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91f2bcce-8980-f42f-2f69-e9f1a8e401b3/java.io.tmpdir/jetty-localhost-45095-hadoop-hdfs-3_4_1-tests_jar-_-any-7851820415546264487/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T21:39:21,725 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2c8e8509{HTTP/1.1, (http/1.1)}{localhost:45095} 2024-11-09T21:39:21,725 INFO [Time-limited test {}] server.Server(415): Started @3872ms 2024-11-09T21:39:21,790 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-09T21:39:21,920 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-09T21:39:21,928 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-09T21:39:21,933 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-09T21:39:21,933 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-09T21:39:21,934 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-09T21:39:21,935 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@267ab2dd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91f2bcce-8980-f42f-2f69-e9f1a8e401b3/hadoop.log.dir/,AVAILABLE} 2024-11-09T21:39:21,935 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@46797e8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-09T21:39:22,059 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2cddf455{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91f2bcce-8980-f42f-2f69-e9f1a8e401b3/java.io.tmpdir/jetty-localhost-34763-hadoop-hdfs-3_4_1-tests_jar-_-any-8812487297197236802/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T21:39:22,060 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6f938ea2{HTTP/1.1, (http/1.1)}{localhost:34763} 2024-11-09T21:39:22,060 INFO [Time-limited test {}] server.Server(415): Started @4207ms 2024-11-09T21:39:22,062 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-09T21:39:23,109 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91f2bcce-8980-f42f-2f69-e9f1a8e401b3/cluster_719ad75b-0962-1c01-5841-5a2981114368/data/data3/current/BP-1298723039-172.17.0.2-1731188360083/current, will proceed with Du for space computation calculation, 2024-11-09T21:39:23,112 WARN [Thread-99 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91f2bcce-8980-f42f-2f69-e9f1a8e401b3/cluster_719ad75b-0962-1c01-5841-5a2981114368/data/data4/current/BP-1298723039-172.17.0.2-1731188360083/current, will proceed with Du for space computation calculation, 2024-11-09T21:39:23,116 WARN [Thread-100 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91f2bcce-8980-f42f-2f69-e9f1a8e401b3/cluster_719ad75b-0962-1c01-5841-5a2981114368/data/data1/current/BP-1298723039-172.17.0.2-1731188360083/current, will proceed with Du for space computation calculation, 2024-11-09T21:39:23,127 WARN [Thread-101 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91f2bcce-8980-f42f-2f69-e9f1a8e401b3/cluster_719ad75b-0962-1c01-5841-5a2981114368/data/data2/current/BP-1298723039-172.17.0.2-1731188360083/current, will proceed with Du for space computation calculation, 2024-11-09T21:39:23,169 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-09T21:39:23,170 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-09T21:39:23,225 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x696a1c43194575a1 with lease ID 0xd7cbca1d552a9a41: Processing first storage report for DS-d7d2d3da-56dc-4bca-8374-50a5e0a1d980 from datanode DatanodeRegistration(127.0.0.1:45239, datanodeUuid=73d4c1be-99df-4545-9076-1ee81d27db8a, infoPort=39887, infoSecurePort=0, ipcPort=34437, storageInfo=lv=-57;cid=testClusterID;nsid=653081868;c=1731188360083) 2024-11-09T21:39:23,227 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x696a1c43194575a1 with lease ID 0xd7cbca1d552a9a41: from storage DS-d7d2d3da-56dc-4bca-8374-50a5e0a1d980 node DatanodeRegistration(127.0.0.1:45239, datanodeUuid=73d4c1be-99df-4545-9076-1ee81d27db8a, infoPort=39887, infoSecurePort=0, ipcPort=34437, storageInfo=lv=-57;cid=testClusterID;nsid=653081868;c=1731188360083), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-09T21:39:23,227 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x59d20b5699957b9b with lease ID 0xd7cbca1d552a9a42: Processing first storage report for DS-664b57a7-4cc1-41f5-8c8e-a968aff5a09d from datanode DatanodeRegistration(127.0.0.1:41017, datanodeUuid=9b4afbb2-eba7-480a-8aee-bbc469ac45d1, infoPort=32863, infoSecurePort=0, ipcPort=43973, storageInfo=lv=-57;cid=testClusterID;nsid=653081868;c=1731188360083) 2024-11-09T21:39:23,227 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x59d20b5699957b9b with lease ID 0xd7cbca1d552a9a42: from storage DS-664b57a7-4cc1-41f5-8c8e-a968aff5a09d node DatanodeRegistration(127.0.0.1:41017, datanodeUuid=9b4afbb2-eba7-480a-8aee-bbc469ac45d1, infoPort=32863, infoSecurePort=0, ipcPort=43973, storageInfo=lv=-57;cid=testClusterID;nsid=653081868;c=1731188360083), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-09T21:39:23,228 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x696a1c43194575a1 with lease ID 0xd7cbca1d552a9a41: Processing first storage report for DS-3d7bbb29-c917-41e2-bf17-e93fd56c32c1 from datanode DatanodeRegistration(127.0.0.1:45239, datanodeUuid=73d4c1be-99df-4545-9076-1ee81d27db8a, infoPort=39887, infoSecurePort=0, ipcPort=34437, storageInfo=lv=-57;cid=testClusterID;nsid=653081868;c=1731188360083) 2024-11-09T21:39:23,228 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x696a1c43194575a1 with lease ID 0xd7cbca1d552a9a41: from storage DS-3d7bbb29-c917-41e2-bf17-e93fd56c32c1 node DatanodeRegistration(127.0.0.1:45239, datanodeUuid=73d4c1be-99df-4545-9076-1ee81d27db8a, infoPort=39887, infoSecurePort=0, ipcPort=34437, storageInfo=lv=-57;cid=testClusterID;nsid=653081868;c=1731188360083), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-09T21:39:23,228 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x59d20b5699957b9b with lease ID 0xd7cbca1d552a9a42: Processing first storage report for DS-753e7fe2-9455-462c-ba2d-bcac6e97d279 from datanode DatanodeRegistration(127.0.0.1:41017, datanodeUuid=9b4afbb2-eba7-480a-8aee-bbc469ac45d1, infoPort=32863, infoSecurePort=0, ipcPort=43973, storageInfo=lv=-57;cid=testClusterID;nsid=653081868;c=1731188360083) 2024-11-09T21:39:23,228 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x59d20b5699957b9b with lease ID 0xd7cbca1d552a9a42: from storage DS-753e7fe2-9455-462c-ba2d-bcac6e97d279 node DatanodeRegistration(127.0.0.1:41017, datanodeUuid=9b4afbb2-eba7-480a-8aee-bbc469ac45d1, infoPort=32863, infoSecurePort=0, ipcPort=43973, storageInfo=lv=-57;cid=testClusterID;nsid=653081868;c=1731188360083), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-09T21:39:23,310 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91f2bcce-8980-f42f-2f69-e9f1a8e401b3 2024-11-09T21:39:23,385 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91f2bcce-8980-f42f-2f69-e9f1a8e401b3/cluster_719ad75b-0962-1c01-5841-5a2981114368/zookeeper_0, clientPort=59566, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91f2bcce-8980-f42f-2f69-e9f1a8e401b3/cluster_719ad75b-0962-1c01-5841-5a2981114368/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91f2bcce-8980-f42f-2f69-e9f1a8e401b3/cluster_719ad75b-0962-1c01-5841-5a2981114368/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-09T21:39:23,394 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=59566 2024-11-09T21:39:23,404 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T21:39:23,406 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T21:39:23,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45239 is added to blk_1073741825_1001 (size=7) 2024-11-09T21:39:23,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41017 is added to blk_1073741825_1001 (size=7) 2024-11-09T21:39:24,038 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b with version=8 2024-11-09T21:39:24,039 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/hbase-staging 2024-11-09T21:39:24,127 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-09T21:39:24,363 INFO [Time-limited test {}] client.ConnectionUtils(128): master/d86ff69d6719:0 server-side Connection retries=45 2024-11-09T21:39:24,371 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T21:39:24,372 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-09T21:39:24,377 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-09T21:39:24,377 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T21:39:24,378 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-09T21:39:24,513 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-09T21:39:24,567 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-09T21:39:24,576 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-09T21:39:24,579 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-09T21:39:24,605 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 32584 (auto-detected) 2024-11-09T21:39:24,606 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-09T21:39:24,625 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:32969 2024-11-09T21:39:24,646 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:32969 connecting to ZooKeeper ensemble=127.0.0.1:59566 2024-11-09T21:39:24,759 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:329690x0, quorum=127.0.0.1:59566, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-09T21:39:24,762 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:32969-0x10122d3a32c0000 connected 2024-11-09T21:39:24,838 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T21:39:24,840 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T21:39:24,849 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:32969-0x10122d3a32c0000, quorum=127.0.0.1:59566, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T21:39:24,853 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b, hbase.cluster.distributed=false 2024-11-09T21:39:24,877 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:32969-0x10122d3a32c0000, quorum=127.0.0.1:59566, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-09T21:39:24,882 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=32969 2024-11-09T21:39:24,883 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=32969 2024-11-09T21:39:24,883 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=32969 2024-11-09T21:39:24,884 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=32969 2024-11-09T21:39:24,884 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=32969 2024-11-09T21:39:24,983 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/d86ff69d6719:0 server-side Connection retries=45 2024-11-09T21:39:24,985 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T21:39:24,985 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-09T21:39:24,985 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-09T21:39:24,986 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T21:39:24,986 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-09T21:39:24,989 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-09T21:39:24,992 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-09T21:39:24,993 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45707 2024-11-09T21:39:24,995 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45707 connecting to ZooKeeper ensemble=127.0.0.1:59566 2024-11-09T21:39:24,996 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T21:39:25,001 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T21:39:25,020 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:457070x0, quorum=127.0.0.1:59566, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-09T21:39:25,021 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:457070x0, quorum=127.0.0.1:59566, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T21:39:25,021 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45707-0x10122d3a32c0001 connected 2024-11-09T21:39:25,025 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-09T21:39:25,031 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-09T21:39:25,034 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45707-0x10122d3a32c0001, quorum=127.0.0.1:59566, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-09T21:39:25,039 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45707-0x10122d3a32c0001, quorum=127.0.0.1:59566, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-09T21:39:25,039 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45707 2024-11-09T21:39:25,040 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45707 2024-11-09T21:39:25,040 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45707 2024-11-09T21:39:25,041 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45707 2024-11-09T21:39:25,041 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45707 2024-11-09T21:39:25,056 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/d86ff69d6719:0 server-side Connection retries=45 2024-11-09T21:39:25,056 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T21:39:25,057 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-09T21:39:25,057 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-09T21:39:25,057 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T21:39:25,057 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-09T21:39:25,057 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-09T21:39:25,058 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-09T21:39:25,058 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45593 2024-11-09T21:39:25,060 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45593 connecting to ZooKeeper ensemble=127.0.0.1:59566 2024-11-09T21:39:25,061 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T21:39:25,063 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T21:39:25,076 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:455930x0, quorum=127.0.0.1:59566, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-09T21:39:25,077 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:455930x0, quorum=127.0.0.1:59566, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T21:39:25,077 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45593-0x10122d3a32c0002 connected 2024-11-09T21:39:25,078 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-09T21:39:25,079 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-09T21:39:25,080 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45593-0x10122d3a32c0002, quorum=127.0.0.1:59566, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-09T21:39:25,081 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45593-0x10122d3a32c0002, quorum=127.0.0.1:59566, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-09T21:39:25,082 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45593 2024-11-09T21:39:25,082 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45593 2024-11-09T21:39:25,082 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45593 2024-11-09T21:39:25,083 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45593 2024-11-09T21:39:25,083 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45593 2024-11-09T21:39:25,097 DEBUG [M:0;d86ff69d6719:32969 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;d86ff69d6719:32969 2024-11-09T21:39:25,098 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/d86ff69d6719,32969,1731188364211 2024-11-09T21:39:25,112 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45707-0x10122d3a32c0001, quorum=127.0.0.1:59566, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T21:39:25,112 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32969-0x10122d3a32c0000, quorum=127.0.0.1:59566, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T21:39:25,112 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45593-0x10122d3a32c0002, quorum=127.0.0.1:59566, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T21:39:25,115 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:32969-0x10122d3a32c0000, quorum=127.0.0.1:59566, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/d86ff69d6719,32969,1731188364211 2024-11-09T21:39:25,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45593-0x10122d3a32c0002, quorum=127.0.0.1:59566, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-09T21:39:25,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32969-0x10122d3a32c0000, quorum=127.0.0.1:59566, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T21:39:25,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45707-0x10122d3a32c0001, quorum=127.0.0.1:59566, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-09T21:39:25,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45707-0x10122d3a32c0001, quorum=127.0.0.1:59566, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T21:39:25,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45593-0x10122d3a32c0002, quorum=127.0.0.1:59566, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T21:39:25,146 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:32969-0x10122d3a32c0000, quorum=127.0.0.1:59566, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-09T21:39:25,147 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/d86ff69d6719,32969,1731188364211 from backup master directory 2024-11-09T21:39:25,159 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45593-0x10122d3a32c0002, quorum=127.0.0.1:59566, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T21:39:25,159 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32969-0x10122d3a32c0000, quorum=127.0.0.1:59566, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/d86ff69d6719,32969,1731188364211 2024-11-09T21:39:25,159 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45707-0x10122d3a32c0001, quorum=127.0.0.1:59566, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T21:39:25,160 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32969-0x10122d3a32c0000, quorum=127.0.0.1:59566, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T21:39:25,160 WARN [master/d86ff69d6719:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-09T21:39:25,161 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=d86ff69d6719,32969,1731188364211 2024-11-09T21:39:25,162 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-09T21:39:25,163 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-09T21:39:25,218 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/hbase.id] with ID: ccc64bdc-60a8-4452-b8d5-921153551095 2024-11-09T21:39:25,218 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/.tmp/hbase.id 2024-11-09T21:39:25,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41017 is added to blk_1073741826_1002 (size=42) 2024-11-09T21:39:25,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45239 is added to blk_1073741826_1002 (size=42) 2024-11-09T21:39:25,231 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/.tmp/hbase.id]:[hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/hbase.id] 2024-11-09T21:39:25,284 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T21:39:25,289 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-09T21:39:25,307 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 16ms. 2024-11-09T21:39:25,335 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45707-0x10122d3a32c0001, quorum=127.0.0.1:59566, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T21:39:25,335 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32969-0x10122d3a32c0000, quorum=127.0.0.1:59566, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T21:39:25,335 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45593-0x10122d3a32c0002, quorum=127.0.0.1:59566, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T21:39:25,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45239 is added to blk_1073741827_1003 (size=196) 2024-11-09T21:39:25,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41017 is added to blk_1073741827_1003 (size=196) 2024-11-09T21:39:25,369 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-09T21:39:25,371 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-09T21:39:25,386 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:150) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:174) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:262) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:231) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:400) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:1003) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2535) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:613) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.lambda$tracedRunnable$2(TraceUtil.java:155) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T21:39:25,391 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-09T21:39:25,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45239 is added to blk_1073741828_1004 (size=1189) 2024-11-09T21:39:25,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41017 is added to blk_1073741828_1004 (size=1189) 2024-11-09T21:39:25,438 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/MasterData/data/master/store 2024-11-09T21:39:25,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41017 is added to blk_1073741829_1005 (size=34) 2024-11-09T21:39:25,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45239 is added to blk_1073741829_1005 (size=34) 2024-11-09T21:39:25,464 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-09T21:39:25,467 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T21:39:25,469 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-09T21:39:25,469 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T21:39:25,469 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T21:39:25,470 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-09T21:39:25,471 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T21:39:25,471 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T21:39:25,472 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731188365469Disabling compacts and flushes for region at 1731188365469Disabling writes for close at 1731188365471 (+2 ms)Writing region close event to WAL at 1731188365471Closed at 1731188365471 2024-11-09T21:39:25,474 WARN [master/d86ff69d6719:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/MasterData/data/master/store/.initializing 2024-11-09T21:39:25,474 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/MasterData/WALs/d86ff69d6719,32969,1731188364211 2024-11-09T21:39:25,482 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-09T21:39:25,499 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d86ff69d6719%2C32969%2C1731188364211, suffix=, logDir=hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/MasterData/WALs/d86ff69d6719,32969,1731188364211, archiveDir=hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/MasterData/oldWALs, maxLogs=10 2024-11-09T21:39:25,528 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/MasterData/WALs/d86ff69d6719,32969,1731188364211/d86ff69d6719%2C32969%2C1731188364211.1731188365505, exclude list is [], retry=0 2024-11-09T21:39:25,544 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41017,DS-664b57a7-4cc1-41f5-8c8e-a968aff5a09d,DISK] 2024-11-09T21:39:25,544 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45239,DS-d7d2d3da-56dc-4bca-8374-50a5e0a1d980,DISK] 2024-11-09T21:39:25,547 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-09T21:39:25,582 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/MasterData/WALs/d86ff69d6719,32969,1731188364211/d86ff69d6719%2C32969%2C1731188364211.1731188365505 2024-11-09T21:39:25,583 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:32863:32863),(127.0.0.1/127.0.0.1:39887:39887)] 2024-11-09T21:39:25,584 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-09T21:39:25,584 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T21:39:25,588 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T21:39:25,589 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T21:39:25,622 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-09T21:39:25,653 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-09T21:39:25,656 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T21:39:25,659 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T21:39:25,659 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-09T21:39:25,663 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-09T21:39:25,663 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T21:39:25,665 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-09T21:39:25,665 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-09T21:39:25,668 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-09T21:39:25,668 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T21:39:25,669 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-09T21:39:25,669 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-09T21:39:25,672 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-09T21:39:25,672 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T21:39:25,673 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-09T21:39:25,674 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T21:39:25,678 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-09T21:39:25,679 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-09T21:39:25,685 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T21:39:25,686 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T21:39:25,690 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-09T21:39:25,694 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T21:39:25,699 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-09T21:39:25,701 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72251275, jitterRate=0.07662789523601532}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-09T21:39:25,709 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731188365601Initializing all the Stores at 1731188365603 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731188365603Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731188365604 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731188365604Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731188365604Cleaning up temporary data from old regions at 1731188365686 (+82 ms)Region opened successfully at 1731188365709 (+23 ms) 2024-11-09T21:39:25,711 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-09T21:39:25,748 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@343705bd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d86ff69d6719/172.17.0.2:0 2024-11-09T21:39:25,775 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-09T21:39:25,785 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-09T21:39:25,786 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-09T21:39:25,789 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-09T21:39:25,791 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-09T21:39:25,796 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-11-09T21:39:25,796 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-09T21:39:25,820 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-09T21:39:25,828 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32969-0x10122d3a32c0000, quorum=127.0.0.1:59566, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-09T21:39:25,884 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-09T21:39:25,891 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-09T21:39:25,895 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32969-0x10122d3a32c0000, quorum=127.0.0.1:59566, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-09T21:39:25,903 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-09T21:39:25,905 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-09T21:39:25,909 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32969-0x10122d3a32c0000, quorum=127.0.0.1:59566, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-09T21:39:25,920 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-09T21:39:25,922 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32969-0x10122d3a32c0000, quorum=127.0.0.1:59566, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-09T21:39:25,934 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-09T21:39:25,952 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32969-0x10122d3a32c0000, quorum=127.0.0.1:59566, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-09T21:39:25,961 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-09T21:39:25,970 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32969-0x10122d3a32c0000, quorum=127.0.0.1:59566, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-09T21:39:25,970 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45593-0x10122d3a32c0002, quorum=127.0.0.1:59566, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-09T21:39:25,970 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45707-0x10122d3a32c0001, quorum=127.0.0.1:59566, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-09T21:39:25,970 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32969-0x10122d3a32c0000, quorum=127.0.0.1:59566, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T21:39:25,970 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45707-0x10122d3a32c0001, quorum=127.0.0.1:59566, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T21:39:25,970 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45593-0x10122d3a32c0002, quorum=127.0.0.1:59566, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T21:39:25,975 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=d86ff69d6719,32969,1731188364211, sessionid=0x10122d3a32c0000, setting cluster-up flag (Was=false) 2024-11-09T21:39:26,003 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45707-0x10122d3a32c0001, quorum=127.0.0.1:59566, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T21:39:26,003 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45593-0x10122d3a32c0002, quorum=127.0.0.1:59566, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T21:39:26,003 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32969-0x10122d3a32c0000, quorum=127.0.0.1:59566, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T21:39:26,028 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-09T21:39:26,032 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=d86ff69d6719,32969,1731188364211 2024-11-09T21:39:26,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45593-0x10122d3a32c0002, quorum=127.0.0.1:59566, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T21:39:26,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45707-0x10122d3a32c0001, quorum=127.0.0.1:59566, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T21:39:26,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32969-0x10122d3a32c0000, quorum=127.0.0.1:59566, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T21:39:26,078 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-09T21:39:26,080 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=d86ff69d6719,32969,1731188364211 2024-11-09T21:39:26,088 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-09T21:39:26,116 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.quotas.MasterQuotasObserver loaded, priority=536870911. 2024-11-09T21:39:26,163 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-09T21:39:26,171 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-09T21:39:26,177 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-09T21:39:26,182 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: d86ff69d6719,32969,1731188364211 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-09T21:39:26,187 INFO [RS:1;d86ff69d6719:45593 {}] regionserver.HRegionServer(746): ClusterId : ccc64bdc-60a8-4452-b8d5-921153551095 2024-11-09T21:39:26,187 INFO [RS:0;d86ff69d6719:45707 {}] regionserver.HRegionServer(746): ClusterId : ccc64bdc-60a8-4452-b8d5-921153551095 2024-11-09T21:39:26,189 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/d86ff69d6719:0, corePoolSize=5, maxPoolSize=5 2024-11-09T21:39:26,189 DEBUG [RS:0;d86ff69d6719:45707 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-09T21:39:26,189 DEBUG [RS:1;d86ff69d6719:45593 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-09T21:39:26,189 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/d86ff69d6719:0, corePoolSize=5, maxPoolSize=5 2024-11-09T21:39:26,189 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/d86ff69d6719:0, corePoolSize=5, maxPoolSize=5 2024-11-09T21:39:26,190 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/d86ff69d6719:0, corePoolSize=5, maxPoolSize=5 2024-11-09T21:39:26,190 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/d86ff69d6719:0, corePoolSize=10, maxPoolSize=10 2024-11-09T21:39:26,190 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/d86ff69d6719:0, corePoolSize=1, maxPoolSize=1 2024-11-09T21:39:26,190 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/d86ff69d6719:0, corePoolSize=2, maxPoolSize=2 2024-11-09T21:39:26,190 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/d86ff69d6719:0, corePoolSize=1, maxPoolSize=1 2024-11-09T21:39:26,192 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731188396192 2024-11-09T21:39:26,194 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-09T21:39:26,195 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-09T21:39:26,196 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-09T21:39:26,196 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-09T21:39:26,198 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-09T21:39:26,199 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-09T21:39:26,199 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-09T21:39:26,199 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-09T21:39:26,200 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-09T21:39:26,203 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T21:39:26,204 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-09T21:39:26,203 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-09T21:39:26,205 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-09T21:39:26,205 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-09T21:39:26,207 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-09T21:39:26,207 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-09T21:39:26,210 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/d86ff69d6719:0:becomeActiveMaster-HFileCleaner.large.0-1731188366208,5,FailOnTimeoutGroup] 2024-11-09T21:39:26,210 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/d86ff69d6719:0:becomeActiveMaster-HFileCleaner.small.0-1731188366210,5,FailOnTimeoutGroup] 2024-11-09T21:39:26,210 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-09T21:39:26,211 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-09T21:39:26,212 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-09T21:39:26,212 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-09T21:39:26,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45239 is added to blk_1073741831_1007 (size=1321) 2024-11-09T21:39:26,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41017 is added to blk_1073741831_1007 (size=1321) 2024-11-09T21:39:26,216 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-09T21:39:26,216 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b 2024-11-09T21:39:26,222 DEBUG [RS:0;d86ff69d6719:45707 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-09T21:39:26,222 DEBUG [RS:1;d86ff69d6719:45593 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-09T21:39:26,222 DEBUG [RS:0;d86ff69d6719:45707 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-09T21:39:26,222 DEBUG [RS:1;d86ff69d6719:45593 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-09T21:39:26,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45239 is added to blk_1073741832_1008 (size=32) 2024-11-09T21:39:26,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41017 is added to blk_1073741832_1008 (size=32) 2024-11-09T21:39:26,228 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T21:39:26,230 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-09T21:39:26,233 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-09T21:39:26,233 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T21:39:26,234 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T21:39:26,234 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-09T21:39:26,237 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-09T21:39:26,237 DEBUG [RS:1;d86ff69d6719:45593 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-09T21:39:26,237 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T21:39:26,238 DEBUG [RS:0;d86ff69d6719:45707 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-09T21:39:26,238 DEBUG [RS:1;d86ff69d6719:45593 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3184742a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d86ff69d6719/172.17.0.2:0 2024-11-09T21:39:26,238 DEBUG [RS:0;d86ff69d6719:45707 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f10f95e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=d86ff69d6719/172.17.0.2:0 2024-11-09T21:39:26,239 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T21:39:26,239 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-09T21:39:26,242 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-09T21:39:26,242 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T21:39:26,243 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T21:39:26,243 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-09T21:39:26,246 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-09T21:39:26,246 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T21:39:26,247 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T21:39:26,247 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-09T21:39:26,249 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/hbase/meta/1588230740 2024-11-09T21:39:26,250 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/hbase/meta/1588230740 2024-11-09T21:39:26,253 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-09T21:39:26,253 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-09T21:39:26,254 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-09T21:39:26,256 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-09T21:39:26,259 DEBUG [RS:0;d86ff69d6719:45707 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;d86ff69d6719:45707 2024-11-09T21:39:26,259 DEBUG [RS:1;d86ff69d6719:45593 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;d86ff69d6719:45593 2024-11-09T21:39:26,260 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-09T21:39:26,261 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68106855, jitterRate=0.014871224761009216}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-09T21:39:26,262 INFO [RS:0;d86ff69d6719:45707 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-09T21:39:26,262 INFO [RS:1;d86ff69d6719:45593 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-09T21:39:26,262 INFO [RS:0;d86ff69d6719:45707 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-09T21:39:26,262 INFO [RS:1;d86ff69d6719:45593 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-09T21:39:26,262 DEBUG [RS:0;d86ff69d6719:45707 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-09T21:39:26,262 DEBUG [RS:1;d86ff69d6719:45593 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-09T21:39:26,265 INFO [RS:0;d86ff69d6719:45707 {}] regionserver.HRegionServer(2659): reportForDuty to master=d86ff69d6719,32969,1731188364211 with port=45707, startcode=1731188364951 2024-11-09T21:39:26,265 INFO [RS:1;d86ff69d6719:45593 {}] regionserver.HRegionServer(2659): reportForDuty to master=d86ff69d6719,32969,1731188364211 with port=45593, startcode=1731188365056 2024-11-09T21:39:26,265 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731188366228Initializing all the Stores at 1731188366230 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731188366230Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731188366230Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731188366230Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731188366230Cleaning up temporary data from old regions at 1731188366253 (+23 ms)Region opened successfully at 1731188366265 (+12 ms) 2024-11-09T21:39:26,265 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-09T21:39:26,265 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-09T21:39:26,266 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-09T21:39:26,266 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-09T21:39:26,266 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-09T21:39:26,267 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-09T21:39:26,267 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731188366265Disabling compacts and flushes for region at 1731188366265Disabling writes for close at 1731188366266 (+1 ms)Writing region close event to WAL at 1731188366267 (+1 ms)Closed at 1731188366267 2024-11-09T21:39:26,271 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-09T21:39:26,271 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-09T21:39:26,277 DEBUG [RS:0;d86ff69d6719:45707 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-09T21:39:26,277 DEBUG [RS:1;d86ff69d6719:45593 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-09T21:39:26,277 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-09T21:39:26,286 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-09T21:39:26,303 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-09T21:39:26,311 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46385, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-09T21:39:26,311 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57485, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-09T21:39:26,317 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32969 {}] master.ServerManager(363): Checking decommissioned status of RegionServer d86ff69d6719,45593,1731188365056 2024-11-09T21:39:26,319 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32969 {}] master.ServerManager(517): Registering regionserver=d86ff69d6719,45593,1731188365056 2024-11-09T21:39:26,327 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32969 {}] master.ServerManager(363): Checking decommissioned status of RegionServer d86ff69d6719,45707,1731188364951 2024-11-09T21:39:26,328 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32969 {}] master.ServerManager(517): Registering regionserver=d86ff69d6719,45707,1731188364951 2024-11-09T21:39:26,331 DEBUG [RS:1;d86ff69d6719:45593 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b 2024-11-09T21:39:26,331 DEBUG [RS:1;d86ff69d6719:45593 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36387 2024-11-09T21:39:26,331 DEBUG [RS:1;d86ff69d6719:45593 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-09T21:39:26,332 DEBUG [RS:0;d86ff69d6719:45707 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b 2024-11-09T21:39:26,332 DEBUG [RS:0;d86ff69d6719:45707 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36387 2024-11-09T21:39:26,332 DEBUG [RS:0;d86ff69d6719:45707 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-09T21:39:26,379 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32969-0x10122d3a32c0000, quorum=127.0.0.1:59566, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-09T21:39:26,381 DEBUG [RS:0;d86ff69d6719:45707 {}] zookeeper.ZKUtil(111): regionserver:45707-0x10122d3a32c0001, quorum=127.0.0.1:59566, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/d86ff69d6719,45707,1731188364951 2024-11-09T21:39:26,381 DEBUG [RS:1;d86ff69d6719:45593 {}] zookeeper.ZKUtil(111): regionserver:45593-0x10122d3a32c0002, quorum=127.0.0.1:59566, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/d86ff69d6719,45593,1731188365056 2024-11-09T21:39:26,381 WARN [RS:0;d86ff69d6719:45707 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-09T21:39:26,381 WARN [RS:1;d86ff69d6719:45593 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-09T21:39:26,381 INFO [RS:0;d86ff69d6719:45707 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-09T21:39:26,381 INFO [RS:1;d86ff69d6719:45593 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-09T21:39:26,382 DEBUG [RS:0;d86ff69d6719:45707 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/WALs/d86ff69d6719,45707,1731188364951 2024-11-09T21:39:26,382 DEBUG [RS:1;d86ff69d6719:45593 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/WALs/d86ff69d6719,45593,1731188365056 2024-11-09T21:39:26,385 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [d86ff69d6719,45707,1731188364951] 2024-11-09T21:39:26,385 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [d86ff69d6719,45593,1731188365056] 2024-11-09T21:39:26,408 INFO [RS:0;d86ff69d6719:45707 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-09T21:39:26,408 INFO [RS:1;d86ff69d6719:45593 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-09T21:39:26,422 INFO [RS:0;d86ff69d6719:45707 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-09T21:39:26,422 INFO [RS:1;d86ff69d6719:45593 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-09T21:39:26,427 INFO [RS:1;d86ff69d6719:45593 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-09T21:39:26,427 INFO [RS:0;d86ff69d6719:45707 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-09T21:39:26,427 INFO [RS:0;d86ff69d6719:45707 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T21:39:26,427 INFO [RS:1;d86ff69d6719:45593 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T21:39:26,428 INFO [RS:1;d86ff69d6719:45593 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-09T21:39:26,430 INFO [RS:0;d86ff69d6719:45707 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-09T21:39:26,433 INFO [RS:1;d86ff69d6719:45593 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-09T21:39:26,433 INFO [RS:0;d86ff69d6719:45707 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-09T21:39:26,435 INFO [RS:1;d86ff69d6719:45593 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-09T21:39:26,435 INFO [RS:0;d86ff69d6719:45707 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-09T21:39:26,435 DEBUG [RS:0;d86ff69d6719:45707 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/d86ff69d6719:0, corePoolSize=1, maxPoolSize=1 2024-11-09T21:39:26,435 DEBUG [RS:1;d86ff69d6719:45593 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/d86ff69d6719:0, corePoolSize=1, maxPoolSize=1 2024-11-09T21:39:26,435 DEBUG [RS:0;d86ff69d6719:45707 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/d86ff69d6719:0, corePoolSize=1, maxPoolSize=1 2024-11-09T21:39:26,435 DEBUG [RS:1;d86ff69d6719:45593 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/d86ff69d6719:0, corePoolSize=1, maxPoolSize=1 2024-11-09T21:39:26,436 DEBUG [RS:0;d86ff69d6719:45707 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/d86ff69d6719:0, corePoolSize=1, maxPoolSize=1 2024-11-09T21:39:26,436 DEBUG [RS:1;d86ff69d6719:45593 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/d86ff69d6719:0, corePoolSize=1, maxPoolSize=1 2024-11-09T21:39:26,436 DEBUG [RS:0;d86ff69d6719:45707 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/d86ff69d6719:0, corePoolSize=1, maxPoolSize=1 2024-11-09T21:39:26,436 DEBUG [RS:1;d86ff69d6719:45593 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/d86ff69d6719:0, corePoolSize=1, maxPoolSize=1 2024-11-09T21:39:26,436 DEBUG [RS:0;d86ff69d6719:45707 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/d86ff69d6719:0, corePoolSize=1, maxPoolSize=1 2024-11-09T21:39:26,436 DEBUG [RS:1;d86ff69d6719:45593 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/d86ff69d6719:0, corePoolSize=1, maxPoolSize=1 2024-11-09T21:39:26,436 DEBUG [RS:0;d86ff69d6719:45707 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/d86ff69d6719:0, corePoolSize=2, maxPoolSize=2 2024-11-09T21:39:26,436 DEBUG [RS:1;d86ff69d6719:45593 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/d86ff69d6719:0, corePoolSize=2, maxPoolSize=2 2024-11-09T21:39:26,436 DEBUG [RS:0;d86ff69d6719:45707 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/d86ff69d6719:0, corePoolSize=1, maxPoolSize=1 2024-11-09T21:39:26,436 DEBUG [RS:1;d86ff69d6719:45593 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/d86ff69d6719:0, corePoolSize=1, maxPoolSize=1 2024-11-09T21:39:26,436 DEBUG [RS:1;d86ff69d6719:45593 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/d86ff69d6719:0, corePoolSize=1, maxPoolSize=1 2024-11-09T21:39:26,436 DEBUG [RS:0;d86ff69d6719:45707 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/d86ff69d6719:0, corePoolSize=1, maxPoolSize=1 2024-11-09T21:39:26,436 DEBUG [RS:1;d86ff69d6719:45593 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/d86ff69d6719:0, corePoolSize=1, maxPoolSize=1 2024-11-09T21:39:26,436 DEBUG [RS:0;d86ff69d6719:45707 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/d86ff69d6719:0, corePoolSize=1, maxPoolSize=1 2024-11-09T21:39:26,436 DEBUG [RS:1;d86ff69d6719:45593 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/d86ff69d6719:0, corePoolSize=1, maxPoolSize=1 2024-11-09T21:39:26,436 DEBUG [RS:0;d86ff69d6719:45707 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/d86ff69d6719:0, corePoolSize=1, maxPoolSize=1 2024-11-09T21:39:26,437 DEBUG [RS:1;d86ff69d6719:45593 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/d86ff69d6719:0, corePoolSize=1, maxPoolSize=1 2024-11-09T21:39:26,437 DEBUG [RS:0;d86ff69d6719:45707 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/d86ff69d6719:0, corePoolSize=1, maxPoolSize=1 2024-11-09T21:39:26,437 DEBUG [RS:1;d86ff69d6719:45593 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/d86ff69d6719:0, corePoolSize=1, maxPoolSize=1 2024-11-09T21:39:26,437 DEBUG [RS:0;d86ff69d6719:45707 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/d86ff69d6719:0, corePoolSize=1, maxPoolSize=1 2024-11-09T21:39:26,437 DEBUG [RS:1;d86ff69d6719:45593 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/d86ff69d6719:0, corePoolSize=3, maxPoolSize=3 2024-11-09T21:39:26,437 DEBUG [RS:0;d86ff69d6719:45707 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/d86ff69d6719:0, corePoolSize=3, maxPoolSize=3 2024-11-09T21:39:26,437 DEBUG [RS:1;d86ff69d6719:45593 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/d86ff69d6719:0, corePoolSize=3, maxPoolSize=3 2024-11-09T21:39:26,437 DEBUG [RS:0;d86ff69d6719:45707 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/d86ff69d6719:0, corePoolSize=3, maxPoolSize=3 2024-11-09T21:39:26,438 INFO [RS:0;d86ff69d6719:45707 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T21:39:26,438 INFO [RS:0;d86ff69d6719:45707 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T21:39:26,438 INFO [RS:1;d86ff69d6719:45593 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T21:39:26,438 INFO [RS:0;d86ff69d6719:45707 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T21:39:26,438 INFO [RS:1;d86ff69d6719:45593 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T21:39:26,438 INFO [RS:0;d86ff69d6719:45707 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-09T21:39:26,438 INFO [RS:1;d86ff69d6719:45593 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T21:39:26,438 INFO [RS:0;d86ff69d6719:45707 {}] hbase.ChoreService(168): Chore ScheduledChore name=FileSystemUtilizationChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T21:39:26,439 INFO [RS:0;d86ff69d6719:45707 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-09T21:39:26,439 INFO [RS:1;d86ff69d6719:45593 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-09T21:39:26,439 INFO [RS:1;d86ff69d6719:45593 {}] hbase.ChoreService(168): Chore ScheduledChore name=FileSystemUtilizationChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T21:39:26,439 INFO [RS:0;d86ff69d6719:45707 {}] hbase.ChoreService(168): Chore ScheduledChore name=d86ff69d6719,45707,1731188364951-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-09T21:39:26,439 INFO [RS:1;d86ff69d6719:45593 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-09T21:39:26,439 INFO [RS:1;d86ff69d6719:45593 {}] hbase.ChoreService(168): Chore ScheduledChore name=d86ff69d6719,45593,1731188365056-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-09T21:39:26,454 WARN [d86ff69d6719:32969 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-09T21:39:26,461 INFO [RS:1;d86ff69d6719:45593 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-09T21:39:26,461 INFO [RS:0;d86ff69d6719:45707 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-09T21:39:26,463 INFO [RS:0;d86ff69d6719:45707 {}] hbase.ChoreService(168): Chore ScheduledChore name=d86ff69d6719,45707,1731188364951-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T21:39:26,463 INFO [RS:1;d86ff69d6719:45593 {}] hbase.ChoreService(168): Chore ScheduledChore name=d86ff69d6719,45593,1731188365056-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T21:39:26,464 INFO [RS:0;d86ff69d6719:45707 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T21:39:26,464 INFO [RS:1;d86ff69d6719:45593 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T21:39:26,464 INFO [RS:0;d86ff69d6719:45707 {}] regionserver.Replication(171): d86ff69d6719,45707,1731188364951 started 2024-11-09T21:39:26,464 INFO [RS:1;d86ff69d6719:45593 {}] regionserver.Replication(171): d86ff69d6719,45593,1731188365056 started 2024-11-09T21:39:26,481 INFO [RS:0;d86ff69d6719:45707 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T21:39:26,482 INFO [RS:0;d86ff69d6719:45707 {}] regionserver.HRegionServer(1482): Serving as d86ff69d6719,45707,1731188364951, RpcServer on d86ff69d6719/172.17.0.2:45707, sessionid=0x10122d3a32c0001 2024-11-09T21:39:26,482 DEBUG [RS:0;d86ff69d6719:45707 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-09T21:39:26,483 DEBUG [RS:0;d86ff69d6719:45707 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager d86ff69d6719,45707,1731188364951 2024-11-09T21:39:26,483 DEBUG [RS:0;d86ff69d6719:45707 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd86ff69d6719,45707,1731188364951' 2024-11-09T21:39:26,483 DEBUG [RS:0;d86ff69d6719:45707 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-09T21:39:26,484 DEBUG [RS:0;d86ff69d6719:45707 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-09T21:39:26,485 DEBUG [RS:0;d86ff69d6719:45707 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-09T21:39:26,485 DEBUG [RS:0;d86ff69d6719:45707 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-09T21:39:26,485 DEBUG [RS:0;d86ff69d6719:45707 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager d86ff69d6719,45707,1731188364951 2024-11-09T21:39:26,485 DEBUG [RS:0;d86ff69d6719:45707 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd86ff69d6719,45707,1731188364951' 2024-11-09T21:39:26,485 DEBUG [RS:0;d86ff69d6719:45707 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-09T21:39:26,486 DEBUG [RS:0;d86ff69d6719:45707 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-09T21:39:26,487 DEBUG [RS:0;d86ff69d6719:45707 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-09T21:39:26,487 INFO [RS:0;d86ff69d6719:45707 {}] quotas.RegionServerRpcQuotaManager(68): Initializing RPC quota support 2024-11-09T21:39:26,487 INFO [RS:1;d86ff69d6719:45593 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T21:39:26,487 INFO [RS:1;d86ff69d6719:45593 {}] regionserver.HRegionServer(1482): Serving as d86ff69d6719,45593,1731188365056, RpcServer on d86ff69d6719/172.17.0.2:45593, sessionid=0x10122d3a32c0002 2024-11-09T21:39:26,488 DEBUG [RS:1;d86ff69d6719:45593 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-09T21:39:26,488 DEBUG [RS:1;d86ff69d6719:45593 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager d86ff69d6719,45593,1731188365056 2024-11-09T21:39:26,488 DEBUG [RS:1;d86ff69d6719:45593 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd86ff69d6719,45593,1731188365056' 2024-11-09T21:39:26,488 DEBUG [RS:1;d86ff69d6719:45593 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-09T21:39:26,489 DEBUG [RS:1;d86ff69d6719:45593 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-09T21:39:26,489 INFO [RS:0;d86ff69d6719:45707 {}] hbase.ChoreService(168): Chore ScheduledChore name=QuotaRefresherChore, period=1800000, unit=MILLISECONDS is enabled. 2024-11-09T21:39:26,490 DEBUG [RS:1;d86ff69d6719:45593 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-09T21:39:26,490 DEBUG [RS:1;d86ff69d6719:45593 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-09T21:39:26,490 DEBUG [RS:1;d86ff69d6719:45593 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager d86ff69d6719,45593,1731188365056 2024-11-09T21:39:26,490 DEBUG [RS:1;d86ff69d6719:45593 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'd86ff69d6719,45593,1731188365056' 2024-11-09T21:39:26,490 DEBUG [RS:1;d86ff69d6719:45593 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-09T21:39:26,490 DEBUG [RS:0;d86ff69d6719:45707 {}] zookeeper.ZKUtil(347): regionserver:45707-0x10122d3a32c0001, quorum=127.0.0.1:59566, baseZNode=/hbase Unable to get data of znode /hbase/rpc-throttle because node does not exist (not an error) 2024-11-09T21:39:26,490 INFO [RS:0;d86ff69d6719:45707 {}] quotas.RegionServerRpcQuotaManager(74): Start rpc quota manager and rpc throttle enabled is true 2024-11-09T21:39:26,491 DEBUG [RS:1;d86ff69d6719:45593 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-09T21:39:26,491 INFO [RS:0;d86ff69d6719:45707 {}] hbase.ChoreService(168): Chore ScheduledChore name=SpaceQuotaRefresherChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T21:39:26,491 INFO [RS:0;d86ff69d6719:45707 {}] hbase.ChoreService(168): Chore ScheduledChore name=RegionSizeReportingChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T21:39:26,491 DEBUG [RS:1;d86ff69d6719:45593 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-09T21:39:26,491 INFO [RS:1;d86ff69d6719:45593 {}] quotas.RegionServerRpcQuotaManager(68): Initializing RPC quota support 2024-11-09T21:39:26,491 INFO [RS:1;d86ff69d6719:45593 {}] hbase.ChoreService(168): Chore ScheduledChore name=QuotaRefresherChore, period=1800000, unit=MILLISECONDS is enabled. 2024-11-09T21:39:26,492 DEBUG [RS:1;d86ff69d6719:45593 {}] zookeeper.ZKUtil(347): regionserver:45593-0x10122d3a32c0002, quorum=127.0.0.1:59566, baseZNode=/hbase Unable to get data of znode /hbase/rpc-throttle because node does not exist (not an error) 2024-11-09T21:39:26,492 INFO [RS:1;d86ff69d6719:45593 {}] quotas.RegionServerRpcQuotaManager(74): Start rpc quota manager and rpc throttle enabled is true 2024-11-09T21:39:26,492 INFO [RS:1;d86ff69d6719:45593 {}] hbase.ChoreService(168): Chore ScheduledChore name=SpaceQuotaRefresherChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T21:39:26,492 INFO [RS:1;d86ff69d6719:45593 {}] hbase.ChoreService(168): Chore ScheduledChore name=RegionSizeReportingChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T21:39:26,599 INFO [RS:0;d86ff69d6719:45707 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-09T21:39:26,599 INFO [RS:1;d86ff69d6719:45593 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-09T21:39:26,602 INFO [RS:1;d86ff69d6719:45593 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d86ff69d6719%2C45593%2C1731188365056, suffix=, logDir=hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/WALs/d86ff69d6719,45593,1731188365056, archiveDir=hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/oldWALs, maxLogs=32 2024-11-09T21:39:26,602 INFO [RS:0;d86ff69d6719:45707 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d86ff69d6719%2C45707%2C1731188364951, suffix=, logDir=hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/WALs/d86ff69d6719,45707,1731188364951, archiveDir=hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/oldWALs, maxLogs=32 2024-11-09T21:39:26,620 DEBUG [RS:0;d86ff69d6719:45707 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/WALs/d86ff69d6719,45707,1731188364951/d86ff69d6719%2C45707%2C1731188364951.1731188366606, exclude list is [], retry=0 2024-11-09T21:39:26,621 DEBUG [RS:1;d86ff69d6719:45593 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/WALs/d86ff69d6719,45593,1731188365056/d86ff69d6719%2C45593%2C1731188365056.1731188366606, exclude list is [], retry=0 2024-11-09T21:39:26,626 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41017,DS-664b57a7-4cc1-41f5-8c8e-a968aff5a09d,DISK] 2024-11-09T21:39:26,626 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45239,DS-d7d2d3da-56dc-4bca-8374-50a5e0a1d980,DISK] 2024-11-09T21:39:26,626 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45239,DS-d7d2d3da-56dc-4bca-8374-50a5e0a1d980,DISK] 2024-11-09T21:39:26,626 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41017,DS-664b57a7-4cc1-41f5-8c8e-a968aff5a09d,DISK] 2024-11-09T21:39:26,631 INFO [RS:1;d86ff69d6719:45593 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/WALs/d86ff69d6719,45593,1731188365056/d86ff69d6719%2C45593%2C1731188365056.1731188366606 2024-11-09T21:39:26,631 INFO [RS:0;d86ff69d6719:45707 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/WALs/d86ff69d6719,45707,1731188364951/d86ff69d6719%2C45707%2C1731188364951.1731188366606 2024-11-09T21:39:26,631 DEBUG [RS:1;d86ff69d6719:45593 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39887:39887),(127.0.0.1/127.0.0.1:32863:32863)] 2024-11-09T21:39:26,631 DEBUG [RS:0;d86ff69d6719:45707 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39887:39887),(127.0.0.1/127.0.0.1:32863:32863)] 2024-11-09T21:39:26,709 DEBUG [d86ff69d6719:32969 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=2, allServersCount=2 2024-11-09T21:39:26,719 DEBUG [d86ff69d6719:32969 {}] balancer.BalancerClusterState(204): Hosts are {d86ff69d6719=0} racks are {/default-rack=0} 2024-11-09T21:39:26,726 DEBUG [d86ff69d6719:32969 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-09T21:39:26,726 DEBUG [d86ff69d6719:32969 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-09T21:39:26,726 DEBUG [d86ff69d6719:32969 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-09T21:39:26,726 DEBUG [d86ff69d6719:32969 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-09T21:39:26,727 INFO [d86ff69d6719:32969 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-09T21:39:26,727 INFO [d86ff69d6719:32969 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-09T21:39:26,727 DEBUG [d86ff69d6719:32969 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-09T21:39:26,734 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=d86ff69d6719,45707,1731188364951 2024-11-09T21:39:26,739 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as d86ff69d6719,45707,1731188364951, state=OPENING 2024-11-09T21:39:26,776 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-09T21:39:26,787 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45593-0x10122d3a32c0002, quorum=127.0.0.1:59566, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T21:39:26,787 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32969-0x10122d3a32c0000, quorum=127.0.0.1:59566, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T21:39:26,787 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45707-0x10122d3a32c0001, quorum=127.0.0.1:59566, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T21:39:26,789 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T21:39:26,790 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T21:39:26,790 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T21:39:26,793 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-09T21:39:26,795 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=d86ff69d6719,45707,1731188364951}] 2024-11-09T21:39:26,973 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-09T21:39:26,975 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48343, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-09T21:39:26,986 INFO [RS_OPEN_META-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-09T21:39:26,987 INFO [RS_OPEN_META-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-09T21:39:26,987 INFO [RS_OPEN_META-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-09T21:39:26,990 INFO [RS_OPEN_META-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=d86ff69d6719%2C45707%2C1731188364951.meta, suffix=.meta, logDir=hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/WALs/d86ff69d6719,45707,1731188364951, archiveDir=hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/oldWALs, maxLogs=32 2024-11-09T21:39:27,005 DEBUG [RS_OPEN_META-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/WALs/d86ff69d6719,45707,1731188364951/d86ff69d6719%2C45707%2C1731188364951.meta.1731188366992.meta, exclude list is [], retry=0 2024-11-09T21:39:27,008 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45239,DS-d7d2d3da-56dc-4bca-8374-50a5e0a1d980,DISK] 2024-11-09T21:39:27,008 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41017,DS-664b57a7-4cc1-41f5-8c8e-a968aff5a09d,DISK] 2024-11-09T21:39:27,012 INFO [RS_OPEN_META-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/WALs/d86ff69d6719,45707,1731188364951/d86ff69d6719%2C45707%2C1731188364951.meta.1731188366992.meta 2024-11-09T21:39:27,012 DEBUG [RS_OPEN_META-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:32863:32863),(127.0.0.1/127.0.0.1:39887:39887)] 2024-11-09T21:39:27,012 DEBUG [RS_OPEN_META-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-09T21:39:27,014 DEBUG [RS_OPEN_META-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-09T21:39:27,017 DEBUG [RS_OPEN_META-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-09T21:39:27,020 INFO [RS_OPEN_META-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-09T21:39:27,026 DEBUG [RS_OPEN_META-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-09T21:39:27,027 DEBUG [RS_OPEN_META-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T21:39:27,027 DEBUG [RS_OPEN_META-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-09T21:39:27,027 DEBUG [RS_OPEN_META-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-09T21:39:27,031 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-09T21:39:27,032 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-09T21:39:27,033 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T21:39:27,033 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T21:39:27,034 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-09T21:39:27,035 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-09T21:39:27,036 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T21:39:27,036 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T21:39:27,037 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-09T21:39:27,038 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-09T21:39:27,038 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T21:39:27,039 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T21:39:27,040 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-09T21:39:27,041 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-09T21:39:27,041 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T21:39:27,042 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T21:39:27,043 DEBUG [RS_OPEN_META-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-09T21:39:27,044 DEBUG [RS_OPEN_META-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/hbase/meta/1588230740 2024-11-09T21:39:27,047 DEBUG [RS_OPEN_META-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/hbase/meta/1588230740 2024-11-09T21:39:27,050 DEBUG [RS_OPEN_META-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-09T21:39:27,050 DEBUG [RS_OPEN_META-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-09T21:39:27,051 DEBUG [RS_OPEN_META-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-09T21:39:27,054 DEBUG [RS_OPEN_META-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-09T21:39:27,056 INFO [RS_OPEN_META-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70900374, jitterRate=0.05649790167808533}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-09T21:39:27,056 DEBUG [RS_OPEN_META-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-09T21:39:27,057 DEBUG [RS_OPEN_META-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731188367028Writing region info on filesystem at 1731188367028Initializing all the Stores at 1731188367030 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731188367030Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731188367031 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731188367031Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731188367031Cleaning up temporary data from old regions at 1731188367050 (+19 ms)Running coprocessor post-open hooks at 1731188367056 (+6 ms)Region opened successfully at 1731188367057 (+1 ms) 2024-11-09T21:39:27,064 INFO [RS_OPEN_META-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731188366966 2024-11-09T21:39:27,075 DEBUG [RS_OPEN_META-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-09T21:39:27,075 INFO [RS_OPEN_META-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-09T21:39:27,077 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=d86ff69d6719,45707,1731188364951 2024-11-09T21:39:27,079 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as d86ff69d6719,45707,1731188364951, state=OPEN 2024-11-09T21:39:27,103 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45707-0x10122d3a32c0001, quorum=127.0.0.1:59566, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-09T21:39:27,103 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45593-0x10122d3a32c0002, quorum=127.0.0.1:59566, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-09T21:39:27,103 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32969-0x10122d3a32c0000, quorum=127.0.0.1:59566, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-09T21:39:27,103 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T21:39:27,103 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T21:39:27,103 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T21:39:27,104 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=d86ff69d6719,45707,1731188364951 2024-11-09T21:39:27,109 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-09T21:39:27,110 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=d86ff69d6719,45707,1731188364951 in 309 msec 2024-11-09T21:39:27,116 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-09T21:39:27,117 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 834 msec 2024-11-09T21:39:27,118 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-09T21:39:27,119 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-09T21:39:27,137 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-09T21:39:27,138 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=d86ff69d6719,45707,1731188364951, seqNum=-1] 2024-11-09T21:39:27,157 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-09T21:39:27,159 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38949, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-09T21:39:27,184 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.0610 sec 2024-11-09T21:39:27,184 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731188367184, completionTime=-1 2024-11-09T21:39:27,187 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=2; waited=0ms, expected min=2 server(s), max=2 server(s), master is running 2024-11-09T21:39:27,187 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-09T21:39:27,212 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=2 2024-11-09T21:39:27,212 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731188427212 2024-11-09T21:39:27,212 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731188487212 2024-11-09T21:39:27,212 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 25 msec 2024-11-09T21:39:27,214 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-09T21:39:27,222 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d86ff69d6719,32969,1731188364211-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T21:39:27,223 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d86ff69d6719,32969,1731188364211-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T21:39:27,223 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d86ff69d6719,32969,1731188364211-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T21:39:27,225 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-d86ff69d6719:32969, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T21:39:27,225 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-09T21:39:27,226 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-09T21:39:27,233 DEBUG [master/d86ff69d6719:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-09T21:39:27,262 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.101sec 2024-11-09T21:39:27,264 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] quotas.MasterQuotaManager(103): Quota table not found. Creating... 2024-11-09T21:39:27,266 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] master.HMaster(2490): Client=null/null create 'hbase:quota', {NAME => 'q', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'u', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-09T21:39:27,275 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=hbase:quota 2024-11-09T21:39:27,276 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] quotas.MasterQuotaManager(107): Initializing quota support 2024-11-09T21:39:27,278 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] namespace.NamespaceStateManager(59): Namespace State Manager started. 2024-11-09T21:39:27,279 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=hbase:quota execute state=CREATE_TABLE_PRE_OPERATION 2024-11-09T21:39:27,281 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T21:39:27,285 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=hbase:quota execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-09T21:39:27,297 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] namespace.NamespaceStateManager(222): Finished updating state of 2 namespaces. 2024-11-09T21:39:27,297 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] namespace.NamespaceAuditor(50): NamespaceAuditor started. 2024-11-09T21:39:27,298 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=QuotaObserverChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T21:39:27,299 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=QuotaObserverChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T21:39:27,301 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-09T21:39:27,302 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-09T21:39:27,303 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-09T21:39:27,303 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-09T21:39:27,303 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@74e2323, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-09T21:39:27,304 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d86ff69d6719,32969,1731188364211-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-09T21:39:27,305 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d86ff69d6719,32969,1731188364211-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-09T21:39:27,307 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-09T21:39:27,307 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-09T21:39:27,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41017 is added to blk_1073741836_1012 (size=624) 2024-11-09T21:39:27,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45239 is added to blk_1073741836_1012 (size=624) 2024-11-09T21:39:27,348 DEBUG [master/d86ff69d6719:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-09T21:39:27,349 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request d86ff69d6719,32969,-1 for getting cluster id 2024-11-09T21:39:27,349 INFO [RegionOpenAndInit-hbase:quota-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => db7fd7a0f2c1cff1a9776ed1fa466bae, NAME => 'hbase:quota,,1731188367265.db7fd7a0f2c1cff1a9776ed1fa466bae.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:quota', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'q', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'u', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b 2024-11-09T21:39:27,350 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-09T21:39:27,350 INFO [master/d86ff69d6719:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=d86ff69d6719,32969,1731188364211-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T21:39:27,353 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-09T21:39:27,380 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ccc64bdc-60a8-4452-b8d5-921153551095' 2024-11-09T21:39:27,384 DEBUG [RPCClient-NioEventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-09T21:39:27,384 DEBUG [RPCClient-NioEventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ccc64bdc-60a8-4452-b8d5-921153551095" 2024-11-09T21:39:27,385 DEBUG [RPCClient-NioEventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@419e5306, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-09T21:39:27,385 DEBUG [RPCClient-NioEventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [d86ff69d6719,32969,-1] 2024-11-09T21:39:27,387 DEBUG [RPCClient-NioEventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-09T21:39:27,392 DEBUG [RPCClient-NioEventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T21:39:27,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45239 is added to blk_1073741837_1013 (size=38) 2024-11-09T21:39:27,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41017 is added to blk_1073741837_1013 (size=38) 2024-11-09T21:39:27,394 DEBUG [RegionOpenAndInit-hbase:quota-pool-0 {}] regionserver.HRegion(898): Instantiated hbase:quota,,1731188367265.db7fd7a0f2c1cff1a9776ed1fa466bae.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T21:39:27,395 DEBUG [RegionOpenAndInit-hbase:quota-pool-0 {}] regionserver.HRegion(1722): Closing db7fd7a0f2c1cff1a9776ed1fa466bae, disabling compactions & flushes 2024-11-09T21:39:27,395 INFO [RegionOpenAndInit-hbase:quota-pool-0 {}] regionserver.HRegion(1755): Closing region hbase:quota,,1731188367265.db7fd7a0f2c1cff1a9776ed1fa466bae. 2024-11-09T21:39:27,395 DEBUG [RegionOpenAndInit-hbase:quota-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:quota,,1731188367265.db7fd7a0f2c1cff1a9776ed1fa466bae. 2024-11-09T21:39:27,395 DEBUG [RegionOpenAndInit-hbase:quota-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on hbase:quota,,1731188367265.db7fd7a0f2c1cff1a9776ed1fa466bae. after waiting 0 ms 2024-11-09T21:39:27,395 DEBUG [RegionOpenAndInit-hbase:quota-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region hbase:quota,,1731188367265.db7fd7a0f2c1cff1a9776ed1fa466bae. 2024-11-09T21:39:27,395 INFO [RegionOpenAndInit-hbase:quota-pool-0 {}] regionserver.HRegion(1973): Closed hbase:quota,,1731188367265.db7fd7a0f2c1cff1a9776ed1fa466bae. 2024-11-09T21:39:27,395 DEBUG [RegionOpenAndInit-hbase:quota-pool-0 {}] regionserver.HRegion(1676): Region close journal for db7fd7a0f2c1cff1a9776ed1fa466bae: Waiting for close lock at 1731188367395Disabling compacts and flushes for region at 1731188367395Disabling writes for close at 1731188367395Writing region close event to WAL at 1731188367395Closed at 1731188367395 2024-11-09T21:39:27,395 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40302, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-09T21:39:27,398 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=hbase:quota execute state=CREATE_TABLE_ADD_TO_META 2024-11-09T21:39:27,399 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ede0eb8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-09T21:39:27,399 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-09T21:39:27,407 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"hbase:quota,,1731188367265.db7fd7a0f2c1cff1a9776ed1fa466bae.","families":{"info":[{"qualifier":"regioninfo","vlen":37,"tag":[],"timestamp":"1731188367399"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731188367399"}]},"ts":"1731188367399"} 2024-11-09T21:39:27,410 DEBUG [RPCClient-NioEventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=d86ff69d6719,45707,1731188364951, seqNum=-1] 2024-11-09T21:39:27,411 DEBUG [RPCClient-NioEventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-09T21:39:27,412 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-09T21:39:27,414 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38160, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-09T21:39:27,416 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=hbase:quota execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-09T21:39:27,419 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:quota","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731188367416"}]},"ts":"1731188367416"} 2024-11-09T21:39:27,426 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:quota, state=ENABLING in hbase:meta 2024-11-09T21:39:27,427 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {d86ff69d6719=0} racks are {/default-rack=0} 2024-11-09T21:39:27,429 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-09T21:39:27,429 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-09T21:39:27,429 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-09T21:39:27,429 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-09T21:39:27,429 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-09T21:39:27,429 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-09T21:39:27,429 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-09T21:39:27,431 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:quota, region=db7fd7a0f2c1cff1a9776ed1fa466bae, ASSIGN}] 2024-11-09T21:39:27,434 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:quota, region=db7fd7a0f2c1cff1a9776ed1fa466bae, ASSIGN 2024-11-09T21:39:27,436 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=d86ff69d6719,32969,1731188364211 2024-11-09T21:39:27,436 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:quota, region=db7fd7a0f2c1cff1a9776ed1fa466bae, ASSIGN; state=OFFLINE, location=d86ff69d6719,45593,1731188365056; forceNewPlan=false, retain=false 2024-11-09T21:39:27,437 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-11-09T21:39:27,591 INFO [d86ff69d6719:32969 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-09T21:39:27,592 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=db7fd7a0f2c1cff1a9776ed1fa466bae, regionState=OPENING, regionLocation=d86ff69d6719,45593,1731188365056 2024-11-09T21:39:27,597 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:quota, region=db7fd7a0f2c1cff1a9776ed1fa466bae, ASSIGN because future has completed 2024-11-09T21:39:27,598 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure db7fd7a0f2c1cff1a9776ed1fa466bae, server=d86ff69d6719,45593,1731188365056}] 2024-11-09T21:39:27,752 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-09T21:39:27,755 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47627, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-09T21:39:27,762 INFO [RS_OPEN_PRIORITY_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(132): Open hbase:quota,,1731188367265.db7fd7a0f2c1cff1a9776ed1fa466bae. 2024-11-09T21:39:27,763 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => db7fd7a0f2c1cff1a9776ed1fa466bae, NAME => 'hbase:quota,,1731188367265.db7fd7a0f2c1cff1a9776ed1fa466bae.', STARTKEY => '', ENDKEY => ''} 2024-11-09T21:39:27,763 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table quota db7fd7a0f2c1cff1a9776ed1fa466bae 2024-11-09T21:39:27,764 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(898): Instantiated hbase:quota,,1731188367265.db7fd7a0f2c1cff1a9776ed1fa466bae.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T21:39:27,764 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for db7fd7a0f2c1cff1a9776ed1fa466bae 2024-11-09T21:39:27,764 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for db7fd7a0f2c1cff1a9776ed1fa466bae 2024-11-09T21:39:27,767 INFO [StoreOpener-db7fd7a0f2c1cff1a9776ed1fa466bae-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family q of region db7fd7a0f2c1cff1a9776ed1fa466bae 2024-11-09T21:39:27,769 INFO [StoreOpener-db7fd7a0f2c1cff1a9776ed1fa466bae-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region db7fd7a0f2c1cff1a9776ed1fa466bae columnFamilyName q 2024-11-09T21:39:27,770 DEBUG [StoreOpener-db7fd7a0f2c1cff1a9776ed1fa466bae-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T21:39:27,771 INFO [StoreOpener-db7fd7a0f2c1cff1a9776ed1fa466bae-1 {}] regionserver.HStore(327): Store=db7fd7a0f2c1cff1a9776ed1fa466bae/q, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-09T21:39:27,771 INFO [StoreOpener-db7fd7a0f2c1cff1a9776ed1fa466bae-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family u of region db7fd7a0f2c1cff1a9776ed1fa466bae 2024-11-09T21:39:27,774 INFO [StoreOpener-db7fd7a0f2c1cff1a9776ed1fa466bae-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region db7fd7a0f2c1cff1a9776ed1fa466bae columnFamilyName u 2024-11-09T21:39:27,775 DEBUG [StoreOpener-db7fd7a0f2c1cff1a9776ed1fa466bae-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T21:39:27,776 INFO [StoreOpener-db7fd7a0f2c1cff1a9776ed1fa466bae-1 {}] regionserver.HStore(327): Store=db7fd7a0f2c1cff1a9776ed1fa466bae/u, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-09T21:39:27,776 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for db7fd7a0f2c1cff1a9776ed1fa466bae 2024-11-09T21:39:27,778 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/hbase/quota/db7fd7a0f2c1cff1a9776ed1fa466bae 2024-11-09T21:39:27,779 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/hbase/quota/db7fd7a0f2c1cff1a9776ed1fa466bae 2024-11-09T21:39:27,781 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for db7fd7a0f2c1cff1a9776ed1fa466bae 2024-11-09T21:39:27,781 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for db7fd7a0f2c1cff1a9776ed1fa466bae 2024-11-09T21:39:27,782 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:quota descriptor;using region.getMemStoreFlushHeapSize/# of families (64.0 M)) instead. 2024-11-09T21:39:27,786 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for db7fd7a0f2c1cff1a9776ed1fa466bae 2024-11-09T21:39:27,790 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/hbase/quota/db7fd7a0f2c1cff1a9776ed1fa466bae/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-09T21:39:27,791 INFO [RS_OPEN_PRIORITY_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1114): Opened db7fd7a0f2c1cff1a9776ed1fa466bae; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73395723, jitterRate=0.09368149936199188}}}, FlushLargeStoresPolicy{flushSizeLowerBound=67108864} 2024-11-09T21:39:27,791 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for db7fd7a0f2c1cff1a9776ed1fa466bae 2024-11-09T21:39:27,792 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for db7fd7a0f2c1cff1a9776ed1fa466bae: Running coprocessor pre-open hook at 1731188367764Writing region info on filesystem at 1731188367764Initializing all the Stores at 1731188367766 (+2 ms)Instantiating store for column family {NAME => 'q', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731188367766Instantiating store for column family {NAME => 'u', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731188367766Cleaning up temporary data from old regions at 1731188367781 (+15 ms)Running coprocessor post-open hooks at 1731188367791 (+10 ms)Region opened successfully at 1731188367792 (+1 ms) 2024-11-09T21:39:27,795 INFO [RS_OPEN_PRIORITY_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:quota,,1731188367265.db7fd7a0f2c1cff1a9776ed1fa466bae., pid=6, masterSystemTime=1731188367752 2024-11-09T21:39:27,800 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:quota,,1731188367265.db7fd7a0f2c1cff1a9776ed1fa466bae. 2024-11-09T21:39:27,800 INFO [RS_OPEN_PRIORITY_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(153): Opened hbase:quota,,1731188367265.db7fd7a0f2c1cff1a9776ed1fa466bae. 2024-11-09T21:39:27,801 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=db7fd7a0f2c1cff1a9776ed1fa466bae, regionState=OPEN, openSeqNum=2, regionLocation=d86ff69d6719,45593,1731188365056 2024-11-09T21:39:27,806 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure db7fd7a0f2c1cff1a9776ed1fa466bae, server=d86ff69d6719,45593,1731188365056 because future has completed 2024-11-09T21:39:27,814 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-09T21:39:27,814 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure db7fd7a0f2c1cff1a9776ed1fa466bae, server=d86ff69d6719,45593,1731188365056 in 210 msec 2024-11-09T21:39:27,818 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-09T21:39:27,818 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:quota, region=db7fd7a0f2c1cff1a9776ed1fa466bae, ASSIGN in 383 msec 2024-11-09T21:39:27,820 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=hbase:quota execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-09T21:39:27,821 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:quota","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731188367820"}]},"ts":"1731188367820"} 2024-11-09T21:39:27,825 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:quota, state=ENABLED in hbase:meta 2024-11-09T21:39:27,827 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=hbase:quota execute state=CREATE_TABLE_POST_OPERATION 2024-11-09T21:39:27,832 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=hbase:quota in 560 msec 2024-11-09T21:39:27,862 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=hbase:quota,, stopping at row=hbase:quota ,, for max=2147483647 with caching=100 2024-11-09T21:39:27,872 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-09T21:39:27,877 DEBUG [RPCClient-NioEventLoopGroup-5-2 {}] client.AsyncConnectionImpl(321): The fetched master address is d86ff69d6719,32969,1731188364211 2024-11-09T21:39:27,879 DEBUG [RPCClient-NioEventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@11c491c7 2024-11-09T21:39:27,883 DEBUG [RPCClient-NioEventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-09T21:39:27,886 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40316, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-09T21:39:27,890 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=hbase:quota,, stopping at row=hbase:quota ,, for max=2147483647 with caching=100 2024-11-09T21:39:27,897 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-09T21:39:27,899 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45632, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-09T21:39:27,913 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestQuotaAdmin0', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-09T21:39:27,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestQuotaAdmin0 2024-11-09T21:39:27,922 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestQuotaAdmin0 execute state=CREATE_TABLE_PRE_OPERATION 2024-11-09T21:39:27,924 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T21:39:27,924 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestQuotaAdmin0" procId is: 7 2024-11-09T21:39:27,927 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestQuotaAdmin0 execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-09T21:39:27,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-09T21:39:27,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45239 is added to blk_1073741838_1014 (size=391) 2024-11-09T21:39:27,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41017 is added to blk_1073741838_1014 (size=391) 2024-11-09T21:39:27,976 INFO [RegionOpenAndInit-TestQuotaAdmin0-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 88d3dc3c67fc12641fcb33d88d7dd202, NAME => 'TestQuotaAdmin0,,1731188367908.88d3dc3c67fc12641fcb33d88d7dd202.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestQuotaAdmin0', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b 2024-11-09T21:39:28,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41017 is added to blk_1073741839_1015 (size=50) 2024-11-09T21:39:28,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45239 is added to blk_1073741839_1015 (size=50) 2024-11-09T21:39:28,005 DEBUG [RegionOpenAndInit-TestQuotaAdmin0-pool-0 {}] regionserver.HRegion(898): Instantiated TestQuotaAdmin0,,1731188367908.88d3dc3c67fc12641fcb33d88d7dd202.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T21:39:28,005 DEBUG [RegionOpenAndInit-TestQuotaAdmin0-pool-0 {}] regionserver.HRegion(1722): Closing 88d3dc3c67fc12641fcb33d88d7dd202, disabling compactions & flushes 2024-11-09T21:39:28,005 INFO [RegionOpenAndInit-TestQuotaAdmin0-pool-0 {}] regionserver.HRegion(1755): Closing region TestQuotaAdmin0,,1731188367908.88d3dc3c67fc12641fcb33d88d7dd202. 2024-11-09T21:39:28,005 DEBUG [RegionOpenAndInit-TestQuotaAdmin0-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestQuotaAdmin0,,1731188367908.88d3dc3c67fc12641fcb33d88d7dd202. 2024-11-09T21:39:28,006 DEBUG [RegionOpenAndInit-TestQuotaAdmin0-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestQuotaAdmin0,,1731188367908.88d3dc3c67fc12641fcb33d88d7dd202. after waiting 0 ms 2024-11-09T21:39:28,006 DEBUG [RegionOpenAndInit-TestQuotaAdmin0-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestQuotaAdmin0,,1731188367908.88d3dc3c67fc12641fcb33d88d7dd202. 2024-11-09T21:39:28,006 INFO [RegionOpenAndInit-TestQuotaAdmin0-pool-0 {}] regionserver.HRegion(1973): Closed TestQuotaAdmin0,,1731188367908.88d3dc3c67fc12641fcb33d88d7dd202. 2024-11-09T21:39:28,006 DEBUG [RegionOpenAndInit-TestQuotaAdmin0-pool-0 {}] regionserver.HRegion(1676): Region close journal for 88d3dc3c67fc12641fcb33d88d7dd202: Waiting for close lock at 1731188368005Disabling compacts and flushes for region at 1731188368005Disabling writes for close at 1731188368006 (+1 ms)Writing region close event to WAL at 1731188368006Closed at 1731188368006 2024-11-09T21:39:28,009 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestQuotaAdmin0 execute state=CREATE_TABLE_ADD_TO_META 2024-11-09T21:39:28,009 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestQuotaAdmin0,,1731188367908.88d3dc3c67fc12641fcb33d88d7dd202.","families":{"info":[{"qualifier":"regioninfo","vlen":49,"tag":[],"timestamp":"1731188368009"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731188368009"}]},"ts":"1731188368009"} 2024-11-09T21:39:28,015 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-09T21:39:28,017 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestQuotaAdmin0 execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-09T21:39:28,018 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin0","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731188368017"}]},"ts":"1731188368017"} 2024-11-09T21:39:28,022 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin0, state=ENABLING in hbase:meta 2024-11-09T21:39:28,022 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {d86ff69d6719=0} racks are {/default-rack=0} 2024-11-09T21:39:28,024 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-09T21:39:28,024 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-09T21:39:28,024 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-09T21:39:28,024 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-09T21:39:28,024 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-09T21:39:28,024 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-09T21:39:28,024 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-09T21:39:28,025 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin0, region=88d3dc3c67fc12641fcb33d88d7dd202, ASSIGN}] 2024-11-09T21:39:28,028 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin0, region=88d3dc3c67fc12641fcb33d88d7dd202, ASSIGN 2024-11-09T21:39:28,030 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestQuotaAdmin0, region=88d3dc3c67fc12641fcb33d88d7dd202, ASSIGN; state=OFFLINE, location=d86ff69d6719,45707,1731188364951; forceNewPlan=false, retain=false 2024-11-09T21:39:28,181 INFO [d86ff69d6719:32969 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-09T21:39:28,182 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=88d3dc3c67fc12641fcb33d88d7dd202, regionState=OPENING, regionLocation=d86ff69d6719,45707,1731188364951 2024-11-09T21:39:28,188 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestQuotaAdmin0, region=88d3dc3c67fc12641fcb33d88d7dd202, ASSIGN because future has completed 2024-11-09T21:39:28,190 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure 88d3dc3c67fc12641fcb33d88d7dd202, server=d86ff69d6719,45707,1731188364951}] 2024-11-09T21:39:28,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-09T21:39:28,351 INFO [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] handler.AssignRegionHandler(132): Open TestQuotaAdmin0,,1731188367908.88d3dc3c67fc12641fcb33d88d7dd202. 2024-11-09T21:39:28,352 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(7752): Opening region: {ENCODED => 88d3dc3c67fc12641fcb33d88d7dd202, NAME => 'TestQuotaAdmin0,,1731188367908.88d3dc3c67fc12641fcb33d88d7dd202.', STARTKEY => '', ENDKEY => ''} 2024-11-09T21:39:28,352 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestQuotaAdmin0 88d3dc3c67fc12641fcb33d88d7dd202 2024-11-09T21:39:28,353 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(898): Instantiated TestQuotaAdmin0,,1731188367908.88d3dc3c67fc12641fcb33d88d7dd202.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T21:39:28,353 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(7794): checking encryption for 88d3dc3c67fc12641fcb33d88d7dd202 2024-11-09T21:39:28,353 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(7797): checking classloading for 88d3dc3c67fc12641fcb33d88d7dd202 2024-11-09T21:39:28,356 INFO [StoreOpener-88d3dc3c67fc12641fcb33d88d7dd202-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 88d3dc3c67fc12641fcb33d88d7dd202 2024-11-09T21:39:28,358 INFO [StoreOpener-88d3dc3c67fc12641fcb33d88d7dd202-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 88d3dc3c67fc12641fcb33d88d7dd202 columnFamilyName cf 2024-11-09T21:39:28,358 DEBUG [StoreOpener-88d3dc3c67fc12641fcb33d88d7dd202-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T21:39:28,359 INFO [StoreOpener-88d3dc3c67fc12641fcb33d88d7dd202-1 {}] regionserver.HStore(327): Store=88d3dc3c67fc12641fcb33d88d7dd202/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-09T21:39:28,360 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1038): replaying wal for 88d3dc3c67fc12641fcb33d88d7dd202 2024-11-09T21:39:28,361 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/default/TestQuotaAdmin0/88d3dc3c67fc12641fcb33d88d7dd202 2024-11-09T21:39:28,362 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/default/TestQuotaAdmin0/88d3dc3c67fc12641fcb33d88d7dd202 2024-11-09T21:39:28,363 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1048): stopping wal replay for 88d3dc3c67fc12641fcb33d88d7dd202 2024-11-09T21:39:28,363 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1060): Cleaning up temporary data for 88d3dc3c67fc12641fcb33d88d7dd202 2024-11-09T21:39:28,366 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1093): writing seq id for 88d3dc3c67fc12641fcb33d88d7dd202 2024-11-09T21:39:28,370 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/default/TestQuotaAdmin0/88d3dc3c67fc12641fcb33d88d7dd202/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-09T21:39:28,371 INFO [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1114): Opened 88d3dc3c67fc12641fcb33d88d7dd202; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75424705, jitterRate=0.12391568720340729}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-09T21:39:28,371 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 88d3dc3c67fc12641fcb33d88d7dd202 2024-11-09T21:39:28,372 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1006): Region open journal for 88d3dc3c67fc12641fcb33d88d7dd202: Running coprocessor pre-open hook at 1731188368353Writing region info on filesystem at 1731188368353Initializing all the Stores at 1731188368355 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731188368355Cleaning up temporary data from old regions at 1731188368363 (+8 ms)Running coprocessor post-open hooks at 1731188368371 (+8 ms)Region opened successfully at 1731188368372 (+1 ms) 2024-11-09T21:39:28,374 INFO [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegionServer(2236): Post open deploy tasks for TestQuotaAdmin0,,1731188367908.88d3dc3c67fc12641fcb33d88d7dd202., pid=9, masterSystemTime=1731188368343 2024-11-09T21:39:28,377 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegionServer(2266): Finished post open deploy task for TestQuotaAdmin0,,1731188367908.88d3dc3c67fc12641fcb33d88d7dd202. 2024-11-09T21:39:28,377 INFO [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] handler.AssignRegionHandler(153): Opened TestQuotaAdmin0,,1731188367908.88d3dc3c67fc12641fcb33d88d7dd202. 2024-11-09T21:39:28,379 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=88d3dc3c67fc12641fcb33d88d7dd202, regionState=OPEN, openSeqNum=2, regionLocation=d86ff69d6719,45707,1731188364951 2024-11-09T21:39:28,383 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure 88d3dc3c67fc12641fcb33d88d7dd202, server=d86ff69d6719,45707,1731188364951 because future has completed 2024-11-09T21:39:28,388 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-09T21:39:28,388 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; OpenRegionProcedure 88d3dc3c67fc12641fcb33d88d7dd202, server=d86ff69d6719,45707,1731188364951 in 194 msec 2024-11-09T21:39:28,391 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-09T21:39:28,391 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin0, region=88d3dc3c67fc12641fcb33d88d7dd202, ASSIGN in 363 msec 2024-11-09T21:39:28,392 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestQuotaAdmin0 execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-09T21:39:28,393 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin0","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731188368393"}]},"ts":"1731188368393"} 2024-11-09T21:39:28,395 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin0, state=ENABLED in hbase:meta 2024-11-09T21:39:28,397 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestQuotaAdmin0 execute state=CREATE_TABLE_POST_OPERATION 2024-11-09T21:39:28,399 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestQuotaAdmin0 in 482 msec 2024-11-09T21:39:28,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-09T21:39:28,712 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestQuotaAdmin0 completed 2024-11-09T21:39:28,712 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestQuotaAdmin0 get assigned. Timeout = 60000ms 2024-11-09T21:39:28,714 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-09T21:39:28,722 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestQuotaAdmin0 assigned to meta. Checking AM states. 2024-11-09T21:39:28,723 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-09T21:39:28,724 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestQuotaAdmin0 assigned. 2024-11-09T21:39:28,724 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-11-09T21:39:28,727 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestQuotaAdmin0,, stopping at row=TestQuotaAdmin0 ,, for max=2147483647 with caching=100 2024-11-09T21:39:28,735 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestQuotaAdmin0,, stopping at row=TestQuotaAdmin0 ,, for max=2147483647 with caching=100 2024-11-09T21:39:28,763 DEBUG [regionserver/d86ff69d6719:0.Chore.1 {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-09T21:39:28,763 DEBUG [regionserver/d86ff69d6719:0.Chore.1 {}] client.AsyncConnectionImpl(321): The fetched master address is d86ff69d6719,32969,1731188364211 2024-11-09T21:39:28,763 DEBUG [regionserver/d86ff69d6719:0.Chore.1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@f0a01a0 2024-11-09T21:39:28,766 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestQuotaAdmin1', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-09T21:39:28,767 DEBUG [regionserver/d86ff69d6719:0.Chore.1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-09T21:39:28,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] procedure2.ProcedureExecutor(1139): Stored pid=10, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestQuotaAdmin1 2024-11-09T21:39:28,769 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33735, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=MasterService 2024-11-09T21:39:28,770 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=10, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestQuotaAdmin1 execute state=CREATE_TABLE_PRE_OPERATION 2024-11-09T21:39:28,771 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T21:39:28,771 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestQuotaAdmin1" procId is: 10 2024-11-09T21:39:28,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=10 2024-11-09T21:39:28,772 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=10, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestQuotaAdmin1 execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-09T21:39:28,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45239 is added to blk_1073741840_1016 (size=391) 2024-11-09T21:39:28,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41017 is added to blk_1073741840_1016 (size=391) 2024-11-09T21:39:28,787 INFO [RegionOpenAndInit-TestQuotaAdmin1-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 2ca8c3dabfb91905eaecd57467637263, NAME => 'TestQuotaAdmin1,,1731188368766.2ca8c3dabfb91905eaecd57467637263.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestQuotaAdmin1', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b 2024-11-09T21:39:28,795 DEBUG [regionserver/d86ff69d6719:0.Chore.1 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-09T21:39:28,795 DEBUG [regionserver/d86ff69d6719:0.Chore.1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=d86ff69d6719,45707,1731188364951, seqNum=-1] 2024-11-09T21:39:28,796 DEBUG [regionserver/d86ff69d6719:0.Chore.1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-09T21:39:28,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45239 is added to blk_1073741841_1017 (size=50) 2024-11-09T21:39:28,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41017 is added to blk_1073741841_1017 (size=50) 2024-11-09T21:39:28,798 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42325, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=ClientService 2024-11-09T21:39:28,798 DEBUG [RegionOpenAndInit-TestQuotaAdmin1-pool-0 {}] regionserver.HRegion(898): Instantiated TestQuotaAdmin1,,1731188368766.2ca8c3dabfb91905eaecd57467637263.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T21:39:28,799 DEBUG [RegionOpenAndInit-TestQuotaAdmin1-pool-0 {}] regionserver.HRegion(1722): Closing 2ca8c3dabfb91905eaecd57467637263, disabling compactions & flushes 2024-11-09T21:39:28,799 INFO [RegionOpenAndInit-TestQuotaAdmin1-pool-0 {}] regionserver.HRegion(1755): Closing region TestQuotaAdmin1,,1731188368766.2ca8c3dabfb91905eaecd57467637263. 2024-11-09T21:39:28,799 DEBUG [RegionOpenAndInit-TestQuotaAdmin1-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestQuotaAdmin1,,1731188368766.2ca8c3dabfb91905eaecd57467637263. 2024-11-09T21:39:28,799 DEBUG [RegionOpenAndInit-TestQuotaAdmin1-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestQuotaAdmin1,,1731188368766.2ca8c3dabfb91905eaecd57467637263. after waiting 0 ms 2024-11-09T21:39:28,799 DEBUG [RegionOpenAndInit-TestQuotaAdmin1-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestQuotaAdmin1,,1731188368766.2ca8c3dabfb91905eaecd57467637263. 2024-11-09T21:39:28,799 INFO [RegionOpenAndInit-TestQuotaAdmin1-pool-0 {}] regionserver.HRegion(1973): Closed TestQuotaAdmin1,,1731188368766.2ca8c3dabfb91905eaecd57467637263. 2024-11-09T21:39:28,799 DEBUG [RegionOpenAndInit-TestQuotaAdmin1-pool-0 {}] regionserver.HRegion(1676): Region close journal for 2ca8c3dabfb91905eaecd57467637263: Waiting for close lock at 1731188368799Disabling compacts and flushes for region at 1731188368799Disabling writes for close at 1731188368799Writing region close event to WAL at 1731188368799Closed at 1731188368799 2024-11-09T21:39:28,801 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=10, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestQuotaAdmin1 execute state=CREATE_TABLE_ADD_TO_META 2024-11-09T21:39:28,801 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestQuotaAdmin1,,1731188368766.2ca8c3dabfb91905eaecd57467637263.","families":{"info":[{"qualifier":"regioninfo","vlen":49,"tag":[],"timestamp":"1731188368801"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731188368801"}]},"ts":"1731188368801"} 2024-11-09T21:39:28,802 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:quota', row='n.default', locateType=CURRENT is [region=hbase:quota,,1731188367265.db7fd7a0f2c1cff1a9776ed1fa466bae., hostname=d86ff69d6719,45593,1731188365056, seqNum=2] 2024-11-09T21:39:28,812 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-09T21:39:28,813 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54759, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=ClientService 2024-11-09T21:39:28,814 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-09T21:39:28,816 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=10, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestQuotaAdmin1 execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-09T21:39:28,816 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731188368816"}]},"ts":"1731188368816"} 2024-11-09T21:39:28,819 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin1, state=ENABLING in hbase:meta 2024-11-09T21:39:28,819 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {d86ff69d6719=0} racks are {/default-rack=0} 2024-11-09T21:39:28,821 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-09T21:39:28,821 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-09T21:39:28,821 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-09T21:39:28,821 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-09T21:39:28,821 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-09T21:39:28,821 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-09T21:39:28,821 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-09T21:39:28,821 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin1, region=2ca8c3dabfb91905eaecd57467637263, ASSIGN}] 2024-11-09T21:39:28,824 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=10, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin1, region=2ca8c3dabfb91905eaecd57467637263, ASSIGN 2024-11-09T21:39:28,825 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=10, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestQuotaAdmin1, region=2ca8c3dabfb91905eaecd57467637263, ASSIGN; state=OFFLINE, location=d86ff69d6719,45593,1731188365056; forceNewPlan=false, retain=false 2024-11-09T21:39:28,976 INFO [d86ff69d6719:32969 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-09T21:39:28,977 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=2ca8c3dabfb91905eaecd57467637263, regionState=OPENING, regionLocation=d86ff69d6719,45593,1731188365056 2024-11-09T21:39:28,985 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=10, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestQuotaAdmin1, region=2ca8c3dabfb91905eaecd57467637263, ASSIGN because future has completed 2024-11-09T21:39:28,986 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2ca8c3dabfb91905eaecd57467637263, server=d86ff69d6719,45593,1731188365056}] 2024-11-09T21:39:29,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32969 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=10 2024-11-09T21:39:29,148 INFO [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestQuotaAdmin1,,1731188368766.2ca8c3dabfb91905eaecd57467637263. 2024-11-09T21:39:29,148 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => 2ca8c3dabfb91905eaecd57467637263, NAME => 'TestQuotaAdmin1,,1731188368766.2ca8c3dabfb91905eaecd57467637263.', STARTKEY => '', ENDKEY => ''} 2024-11-09T21:39:29,149 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestQuotaAdmin1 2ca8c3dabfb91905eaecd57467637263 2024-11-09T21:39:29,149 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestQuotaAdmin1,,1731188368766.2ca8c3dabfb91905eaecd57467637263.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T21:39:29,150 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for 2ca8c3dabfb91905eaecd57467637263 2024-11-09T21:39:29,150 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for 2ca8c3dabfb91905eaecd57467637263 2024-11-09T21:39:29,152 INFO [StoreOpener-2ca8c3dabfb91905eaecd57467637263-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 2ca8c3dabfb91905eaecd57467637263 2024-11-09T21:39:29,155 INFO [StoreOpener-2ca8c3dabfb91905eaecd57467637263-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2ca8c3dabfb91905eaecd57467637263 columnFamilyName cf 2024-11-09T21:39:29,155 DEBUG [StoreOpener-2ca8c3dabfb91905eaecd57467637263-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T21:39:29,156 INFO [StoreOpener-2ca8c3dabfb91905eaecd57467637263-1 {}] regionserver.HStore(327): Store=2ca8c3dabfb91905eaecd57467637263/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-09T21:39:29,156 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for 2ca8c3dabfb91905eaecd57467637263 2024-11-09T21:39:29,157 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/default/TestQuotaAdmin1/2ca8c3dabfb91905eaecd57467637263 2024-11-09T21:39:29,158 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/default/TestQuotaAdmin1/2ca8c3dabfb91905eaecd57467637263 2024-11-09T21:39:29,158 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for 2ca8c3dabfb91905eaecd57467637263 2024-11-09T21:39:29,158 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for 2ca8c3dabfb91905eaecd57467637263 2024-11-09T21:39:29,161 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for 2ca8c3dabfb91905eaecd57467637263 2024-11-09T21:39:29,164 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/default/TestQuotaAdmin1/2ca8c3dabfb91905eaecd57467637263/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-09T21:39:29,165 INFO [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened 2ca8c3dabfb91905eaecd57467637263; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61745944, jitterRate=-0.07991373538970947}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-09T21:39:29,165 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 2ca8c3dabfb91905eaecd57467637263 2024-11-09T21:39:29,166 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for 2ca8c3dabfb91905eaecd57467637263: Running coprocessor pre-open hook at 1731188369150Writing region info on filesystem at 1731188369150Initializing all the Stores at 1731188369152 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731188369152Cleaning up temporary data from old regions at 1731188369158 (+6 ms)Running coprocessor post-open hooks at 1731188369165 (+7 ms)Region opened successfully at 1731188369166 (+1 ms) 2024-11-09T21:39:29,167 INFO [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestQuotaAdmin1,,1731188368766.2ca8c3dabfb91905eaecd57467637263., pid=12, masterSystemTime=1731188369140 2024-11-09T21:39:29,170 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestQuotaAdmin1,,1731188368766.2ca8c3dabfb91905eaecd57467637263. 2024-11-09T21:39:29,170 INFO [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestQuotaAdmin1,,1731188368766.2ca8c3dabfb91905eaecd57467637263. 2024-11-09T21:39:29,171 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=2ca8c3dabfb91905eaecd57467637263, regionState=OPEN, openSeqNum=2, regionLocation=d86ff69d6719,45593,1731188365056 2024-11-09T21:39:29,174 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2ca8c3dabfb91905eaecd57467637263, server=d86ff69d6719,45593,1731188365056 because future has completed 2024-11-09T21:39:29,180 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-09T21:39:29,181 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure 2ca8c3dabfb91905eaecd57467637263, server=d86ff69d6719,45593,1731188365056 in 190 msec 2024-11-09T21:39:29,183 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=10 2024-11-09T21:39:29,183 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=10, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin1, region=2ca8c3dabfb91905eaecd57467637263, ASSIGN in 359 msec 2024-11-09T21:39:29,184 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=10, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestQuotaAdmin1 execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-09T21:39:29,185 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731188369185"}]},"ts":"1731188369185"} 2024-11-09T21:39:29,187 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin1, state=ENABLED in hbase:meta 2024-11-09T21:39:29,189 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=10, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestQuotaAdmin1 execute state=CREATE_TABLE_POST_OPERATION 2024-11-09T21:39:29,192 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestQuotaAdmin1 in 422 msec 2024-11-09T21:39:29,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=32969 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=10 2024-11-09T21:39:29,541 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestQuotaAdmin1 completed 2024-11-09T21:39:29,541 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestQuotaAdmin1 get assigned. Timeout = 60000ms 2024-11-09T21:39:29,542 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-09T21:39:29,550 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestQuotaAdmin1 assigned to meta. Checking AM states. 2024-11-09T21:39:29,550 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-09T21:39:29,550 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestQuotaAdmin1 assigned. 2024-11-09T21:39:29,550 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-11-09T21:39:29,553 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestQuotaAdmin1,, stopping at row=TestQuotaAdmin1 ,, for max=2147483647 with caching=100 2024-11-09T21:39:29,559 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestQuotaAdmin1,, stopping at row=TestQuotaAdmin1 ,, for max=2147483647 with caching=100 2024-11-09T21:39:29,563 DEBUG [regionserver/d86ff69d6719:0.Chore.1 {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-09T21:39:29,564 DEBUG [regionserver/d86ff69d6719:0.Chore.1 {}] client.AsyncConnectionImpl(321): The fetched master address is d86ff69d6719,32969,1731188364211 2024-11-09T21:39:29,564 DEBUG [regionserver/d86ff69d6719:0.Chore.1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@1e8a56b 2024-11-09T21:39:29,564 DEBUG [regionserver/d86ff69d6719:0.Chore.1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-09T21:39:29,565 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55837, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=MasterService 2024-11-09T21:39:29,567 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestQuotaAdmin2', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-09T21:39:29,567 DEBUG [regionserver/d86ff69d6719:0.Chore.1 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-09T21:39:29,568 DEBUG [regionserver/d86ff69d6719:0.Chore.1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=d86ff69d6719,45707,1731188364951, seqNum=-1] 2024-11-09T21:39:29,568 DEBUG [regionserver/d86ff69d6719:0.Chore.1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-09T21:39:29,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestQuotaAdmin2 2024-11-09T21:39:29,570 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32923, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=ClientService 2024-11-09T21:39:29,570 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=13, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestQuotaAdmin2 execute state=CREATE_TABLE_PRE_OPERATION 2024-11-09T21:39:29,570 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T21:39:29,570 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestQuotaAdmin2" procId is: 13 2024-11-09T21:39:29,571 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=13, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestQuotaAdmin2 execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-09T21:39:29,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-09T21:39:29,573 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:quota', row='n.default', locateType=CURRENT is [region=hbase:quota,,1731188367265.db7fd7a0f2c1cff1a9776ed1fa466bae., hostname=d86ff69d6719,45593,1731188365056, seqNum=2] 2024-11-09T21:39:29,573 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-09T21:39:29,575 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51651, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=ClientService 2024-11-09T21:39:29,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45239 is added to blk_1073741842_1018 (size=391) 2024-11-09T21:39:29,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41017 is added to blk_1073741842_1018 (size=391) 2024-11-09T21:39:29,584 INFO [RegionOpenAndInit-TestQuotaAdmin2-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => a3e9bd3eed656037630723db27e2ece1, NAME => 'TestQuotaAdmin2,,1731188369566.a3e9bd3eed656037630723db27e2ece1.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestQuotaAdmin2', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b 2024-11-09T21:39:29,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45239 is added to blk_1073741843_1019 (size=50) 2024-11-09T21:39:29,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41017 is added to blk_1073741843_1019 (size=50) 2024-11-09T21:39:29,593 DEBUG [RegionOpenAndInit-TestQuotaAdmin2-pool-0 {}] regionserver.HRegion(898): Instantiated TestQuotaAdmin2,,1731188369566.a3e9bd3eed656037630723db27e2ece1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T21:39:29,593 DEBUG [RegionOpenAndInit-TestQuotaAdmin2-pool-0 {}] regionserver.HRegion(1722): Closing a3e9bd3eed656037630723db27e2ece1, disabling compactions & flushes 2024-11-09T21:39:29,593 INFO [RegionOpenAndInit-TestQuotaAdmin2-pool-0 {}] regionserver.HRegion(1755): Closing region TestQuotaAdmin2,,1731188369566.a3e9bd3eed656037630723db27e2ece1. 2024-11-09T21:39:29,593 DEBUG [RegionOpenAndInit-TestQuotaAdmin2-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestQuotaAdmin2,,1731188369566.a3e9bd3eed656037630723db27e2ece1. 2024-11-09T21:39:29,593 DEBUG [RegionOpenAndInit-TestQuotaAdmin2-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestQuotaAdmin2,,1731188369566.a3e9bd3eed656037630723db27e2ece1. after waiting 0 ms 2024-11-09T21:39:29,593 DEBUG [RegionOpenAndInit-TestQuotaAdmin2-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestQuotaAdmin2,,1731188369566.a3e9bd3eed656037630723db27e2ece1. 2024-11-09T21:39:29,593 INFO [RegionOpenAndInit-TestQuotaAdmin2-pool-0 {}] regionserver.HRegion(1973): Closed TestQuotaAdmin2,,1731188369566.a3e9bd3eed656037630723db27e2ece1. 2024-11-09T21:39:29,593 DEBUG [RegionOpenAndInit-TestQuotaAdmin2-pool-0 {}] regionserver.HRegion(1676): Region close journal for a3e9bd3eed656037630723db27e2ece1: Waiting for close lock at 1731188369593Disabling compacts and flushes for region at 1731188369593Disabling writes for close at 1731188369593Writing region close event to WAL at 1731188369593Closed at 1731188369593 2024-11-09T21:39:29,595 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=13, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestQuotaAdmin2 execute state=CREATE_TABLE_ADD_TO_META 2024-11-09T21:39:29,595 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestQuotaAdmin2,,1731188369566.a3e9bd3eed656037630723db27e2ece1.","families":{"info":[{"qualifier":"regioninfo","vlen":49,"tag":[],"timestamp":"1731188369595"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731188369595"}]},"ts":"1731188369595"} 2024-11-09T21:39:29,598 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-09T21:39:29,599 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=13, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestQuotaAdmin2 execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-09T21:39:29,599 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin2","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731188369599"}]},"ts":"1731188369599"} 2024-11-09T21:39:29,602 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin2, state=ENABLING in hbase:meta 2024-11-09T21:39:29,602 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {d86ff69d6719=0} racks are {/default-rack=0} 2024-11-09T21:39:29,603 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-09T21:39:29,604 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-09T21:39:29,604 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-09T21:39:29,604 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-09T21:39:29,604 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-09T21:39:29,604 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-09T21:39:29,604 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-09T21:39:29,604 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin2, region=a3e9bd3eed656037630723db27e2ece1, ASSIGN}] 2024-11-09T21:39:29,606 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=14, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin2, region=a3e9bd3eed656037630723db27e2ece1, ASSIGN 2024-11-09T21:39:29,607 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=14, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestQuotaAdmin2, region=a3e9bd3eed656037630723db27e2ece1, ASSIGN; state=OFFLINE, location=d86ff69d6719,45707,1731188364951; forceNewPlan=false, retain=false 2024-11-09T21:39:29,758 INFO [d86ff69d6719:32969 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-09T21:39:29,758 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=14 updating hbase:meta row=a3e9bd3eed656037630723db27e2ece1, regionState=OPENING, regionLocation=d86ff69d6719,45707,1731188364951 2024-11-09T21:39:29,765 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=14, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestQuotaAdmin2, region=a3e9bd3eed656037630723db27e2ece1, ASSIGN because future has completed 2024-11-09T21:39:29,766 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE, hasLock=false; OpenRegionProcedure a3e9bd3eed656037630723db27e2ece1, server=d86ff69d6719,45707,1731188364951}] 2024-11-09T21:39:29,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-09T21:39:29,928 INFO [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] handler.AssignRegionHandler(132): Open TestQuotaAdmin2,,1731188369566.a3e9bd3eed656037630723db27e2ece1. 2024-11-09T21:39:29,928 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(7752): Opening region: {ENCODED => a3e9bd3eed656037630723db27e2ece1, NAME => 'TestQuotaAdmin2,,1731188369566.a3e9bd3eed656037630723db27e2ece1.', STARTKEY => '', ENDKEY => ''} 2024-11-09T21:39:29,928 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestQuotaAdmin2 a3e9bd3eed656037630723db27e2ece1 2024-11-09T21:39:29,928 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(898): Instantiated TestQuotaAdmin2,,1731188369566.a3e9bd3eed656037630723db27e2ece1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T21:39:29,929 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(7794): checking encryption for a3e9bd3eed656037630723db27e2ece1 2024-11-09T21:39:29,929 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(7797): checking classloading for a3e9bd3eed656037630723db27e2ece1 2024-11-09T21:39:29,931 INFO [StoreOpener-a3e9bd3eed656037630723db27e2ece1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region a3e9bd3eed656037630723db27e2ece1 2024-11-09T21:39:29,933 INFO [StoreOpener-a3e9bd3eed656037630723db27e2ece1-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a3e9bd3eed656037630723db27e2ece1 columnFamilyName cf 2024-11-09T21:39:29,933 DEBUG [StoreOpener-a3e9bd3eed656037630723db27e2ece1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T21:39:29,934 INFO [StoreOpener-a3e9bd3eed656037630723db27e2ece1-1 {}] regionserver.HStore(327): Store=a3e9bd3eed656037630723db27e2ece1/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-09T21:39:29,934 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1038): replaying wal for a3e9bd3eed656037630723db27e2ece1 2024-11-09T21:39:29,935 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/default/TestQuotaAdmin2/a3e9bd3eed656037630723db27e2ece1 2024-11-09T21:39:29,936 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/default/TestQuotaAdmin2/a3e9bd3eed656037630723db27e2ece1 2024-11-09T21:39:29,936 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1048): stopping wal replay for a3e9bd3eed656037630723db27e2ece1 2024-11-09T21:39:29,937 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1060): Cleaning up temporary data for a3e9bd3eed656037630723db27e2ece1 2024-11-09T21:39:29,940 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1093): writing seq id for a3e9bd3eed656037630723db27e2ece1 2024-11-09T21:39:29,943 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/default/TestQuotaAdmin2/a3e9bd3eed656037630723db27e2ece1/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-09T21:39:29,944 INFO [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1114): Opened a3e9bd3eed656037630723db27e2ece1; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60515334, jitterRate=-0.09825125336647034}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-09T21:39:29,944 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1122): Running coprocessor post-open hooks for a3e9bd3eed656037630723db27e2ece1 2024-11-09T21:39:29,945 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1006): Region open journal for a3e9bd3eed656037630723db27e2ece1: Running coprocessor pre-open hook at 1731188369929Writing region info on filesystem at 1731188369929Initializing all the Stores at 1731188369930 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731188369930Cleaning up temporary data from old regions at 1731188369937 (+7 ms)Running coprocessor post-open hooks at 1731188369944 (+7 ms)Region opened successfully at 1731188369945 (+1 ms) 2024-11-09T21:39:29,947 INFO [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegionServer(2236): Post open deploy tasks for TestQuotaAdmin2,,1731188369566.a3e9bd3eed656037630723db27e2ece1., pid=15, masterSystemTime=1731188369920 2024-11-09T21:39:29,950 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegionServer(2266): Finished post open deploy task for TestQuotaAdmin2,,1731188369566.a3e9bd3eed656037630723db27e2ece1. 2024-11-09T21:39:29,950 INFO [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] handler.AssignRegionHandler(153): Opened TestQuotaAdmin2,,1731188369566.a3e9bd3eed656037630723db27e2ece1. 2024-11-09T21:39:29,951 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=14 updating hbase:meta row=a3e9bd3eed656037630723db27e2ece1, regionState=OPEN, openSeqNum=2, regionLocation=d86ff69d6719,45707,1731188364951 2024-11-09T21:39:29,954 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=15, ppid=14, state=RUNNABLE, hasLock=false; OpenRegionProcedure a3e9bd3eed656037630723db27e2ece1, server=d86ff69d6719,45707,1731188364951 because future has completed 2024-11-09T21:39:29,958 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=15, resume processing ppid=14 2024-11-09T21:39:29,958 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=15, ppid=14, state=SUCCESS, hasLock=false; OpenRegionProcedure a3e9bd3eed656037630723db27e2ece1, server=d86ff69d6719,45707,1731188364951 in 189 msec 2024-11-09T21:39:29,961 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-09T21:39:29,962 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin2, region=a3e9bd3eed656037630723db27e2ece1, ASSIGN in 354 msec 2024-11-09T21:39:29,963 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=13, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestQuotaAdmin2 execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-09T21:39:29,963 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin2","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731188369963"}]},"ts":"1731188369963"} 2024-11-09T21:39:29,966 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin2, state=ENABLED in hbase:meta 2024-11-09T21:39:29,967 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=13, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestQuotaAdmin2 execute state=CREATE_TABLE_POST_OPERATION 2024-11-09T21:39:29,971 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestQuotaAdmin2 in 400 msec 2024-11-09T21:39:30,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-09T21:39:30,340 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestQuotaAdmin2 completed 2024-11-09T21:39:30,340 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestQuotaAdmin2 get assigned. Timeout = 60000ms 2024-11-09T21:39:30,340 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-09T21:39:30,344 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestQuotaAdmin2 assigned to meta. Checking AM states. 2024-11-09T21:39:30,344 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-09T21:39:30,344 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestQuotaAdmin2 assigned. 2024-11-09T21:39:30,344 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-11-09T21:39:30,346 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestQuotaAdmin2,, stopping at row=TestQuotaAdmin2 ,, for max=2147483647 with caching=100 2024-11-09T21:39:30,350 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestQuotaAdmin2,, stopping at row=TestQuotaAdmin2 ,, for max=2147483647 with caching=100 2024-11-09T21:39:30,358 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] master.HMaster$18(3529): Client=jenkins//172.17.0.2 creating {NAME => 'TestNs'} 2024-11-09T21:39:30,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] procedure2.ProcedureExecutor(1139): Stored pid=16, state=RUNNABLE:CREATE_NAMESPACE_PREPARE, hasLock=false; CreateNamespaceProcedure, namespace=TestNs 2024-11-09T21:39:30,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=16 2024-11-09T21:39:30,375 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=16, state=SUCCESS, hasLock=false; CreateNamespaceProcedure, namespace=TestNs in 14 msec 2024-11-09T21:39:30,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=16 2024-11-09T21:39:30,630 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$NamespaceProcedureBiConsumer(2745): Operation: CREATE_NAMESPACE, Namespace: TestNs completed 2024-11-09T21:39:30,631 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestNs:TestTable', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-09T21:39:30,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] procedure2.ProcedureExecutor(1139): Stored pid=17, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestNs:TestTable 2024-11-09T21:39:30,635 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=17, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestNs:TestTable execute state=CREATE_TABLE_PRE_OPERATION 2024-11-09T21:39:30,635 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T21:39:30,636 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "TestNs" qualifier: "TestTable" procId is: 17 2024-11-09T21:39:30,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=17 2024-11-09T21:39:30,638 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=17, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestNs:TestTable execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-09T21:39:30,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41017 is added to blk_1073741844_1020 (size=358) 2024-11-09T21:39:30,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45239 is added to blk_1073741844_1020 (size=358) 2024-11-09T21:39:30,650 INFO [RegionOpenAndInit-TestNs:TestTable-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 6b63665888c8e2240cbfb5b32190ca91, NAME => 'TestNs:TestTable,,1731188370631.6b63665888c8e2240cbfb5b32190ca91.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='TestNs:TestTable', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b 2024-11-09T21:39:30,650 INFO [RegionOpenAndInit-TestNs:TestTable-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 963d0586451c5d763fb7c40b77e2f999, NAME => 'TestNs:TestTable,1,1731188370631.963d0586451c5d763fb7c40b77e2f999.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='TestNs:TestTable', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b 2024-11-09T21:39:30,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41017 is added to blk_1073741845_1021 (size=44) 2024-11-09T21:39:30,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45239 is added to blk_1073741846_1022 (size=44) 2024-11-09T21:39:30,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41017 is added to blk_1073741846_1022 (size=44) 2024-11-09T21:39:30,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45239 is added to blk_1073741845_1021 (size=44) 2024-11-09T21:39:30,661 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-1 {}] regionserver.HRegion(898): Instantiated TestNs:TestTable,1,1731188370631.963d0586451c5d763fb7c40b77e2f999.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T21:39:30,661 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-0 {}] regionserver.HRegion(898): Instantiated TestNs:TestTable,,1731188370631.6b63665888c8e2240cbfb5b32190ca91.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T21:39:30,661 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-1 {}] regionserver.HRegion(1722): Closing 963d0586451c5d763fb7c40b77e2f999, disabling compactions & flushes 2024-11-09T21:39:30,661 INFO [RegionOpenAndInit-TestNs:TestTable-pool-1 {}] regionserver.HRegion(1755): Closing region TestNs:TestTable,1,1731188370631.963d0586451c5d763fb7c40b77e2f999. 2024-11-09T21:39:30,661 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-0 {}] regionserver.HRegion(1722): Closing 6b63665888c8e2240cbfb5b32190ca91, disabling compactions & flushes 2024-11-09T21:39:30,661 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestNs:TestTable,1,1731188370631.963d0586451c5d763fb7c40b77e2f999. 2024-11-09T21:39:30,661 INFO [RegionOpenAndInit-TestNs:TestTable-pool-0 {}] regionserver.HRegion(1755): Closing region TestNs:TestTable,,1731188370631.6b63665888c8e2240cbfb5b32190ca91. 2024-11-09T21:39:30,661 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on TestNs:TestTable,1,1731188370631.963d0586451c5d763fb7c40b77e2f999. after waiting 0 ms 2024-11-09T21:39:30,661 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestNs:TestTable,,1731188370631.6b63665888c8e2240cbfb5b32190ca91. 2024-11-09T21:39:30,661 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region TestNs:TestTable,1,1731188370631.963d0586451c5d763fb7c40b77e2f999. 2024-11-09T21:39:30,661 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestNs:TestTable,,1731188370631.6b63665888c8e2240cbfb5b32190ca91. after waiting 0 ms 2024-11-09T21:39:30,661 INFO [RegionOpenAndInit-TestNs:TestTable-pool-1 {}] regionserver.HRegion(1973): Closed TestNs:TestTable,1,1731188370631.963d0586451c5d763fb7c40b77e2f999. 2024-11-09T21:39:30,661 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestNs:TestTable,,1731188370631.6b63665888c8e2240cbfb5b32190ca91. 2024-11-09T21:39:30,661 INFO [RegionOpenAndInit-TestNs:TestTable-pool-0 {}] regionserver.HRegion(1973): Closed TestNs:TestTable,,1731188370631.6b63665888c8e2240cbfb5b32190ca91. 2024-11-09T21:39:30,661 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-1 {}] regionserver.HRegion(1676): Region close journal for 963d0586451c5d763fb7c40b77e2f999: Waiting for close lock at 1731188370661Disabling compacts and flushes for region at 1731188370661Disabling writes for close at 1731188370661Writing region close event to WAL at 1731188370661Closed at 1731188370661 2024-11-09T21:39:30,661 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-0 {}] regionserver.HRegion(1676): Region close journal for 6b63665888c8e2240cbfb5b32190ca91: Waiting for close lock at 1731188370661Disabling compacts and flushes for region at 1731188370661Disabling writes for close at 1731188370661Writing region close event to WAL at 1731188370661Closed at 1731188370661 2024-11-09T21:39:30,663 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=17, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestNs:TestTable execute state=CREATE_TABLE_ADD_TO_META 2024-11-09T21:39:30,663 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestNs:TestTable,1,1731188370631.963d0586451c5d763fb7c40b77e2f999.","families":{"info":[{"qualifier":"regioninfo","vlen":43,"tag":[],"timestamp":"1731188370663"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731188370663"}]},"ts":"1731188370663"} 2024-11-09T21:39:30,663 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestNs:TestTable,,1731188370631.6b63665888c8e2240cbfb5b32190ca91.","families":{"info":[{"qualifier":"regioninfo","vlen":43,"tag":[],"timestamp":"1731188370663"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731188370663"}]},"ts":"1731188370663"} 2024-11-09T21:39:30,669 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-09T21:39:30,671 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=17, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestNs:TestTable execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-09T21:39:30,671 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestNs:TestTable","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731188370671"}]},"ts":"1731188370671"} 2024-11-09T21:39:30,674 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=TestNs:TestTable, state=ENABLING in hbase:meta 2024-11-09T21:39:30,674 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {d86ff69d6719=0} racks are {/default-rack=0} 2024-11-09T21:39:30,676 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-09T21:39:30,676 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-09T21:39:30,676 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-09T21:39:30,676 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-09T21:39:30,676 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-09T21:39:30,676 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-09T21:39:30,676 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-09T21:39:30,676 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=18, ppid=17, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=6b63665888c8e2240cbfb5b32190ca91, ASSIGN}, {pid=19, ppid=17, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=963d0586451c5d763fb7c40b77e2f999, ASSIGN}] 2024-11-09T21:39:30,678 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=18, ppid=17, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=6b63665888c8e2240cbfb5b32190ca91, ASSIGN 2024-11-09T21:39:30,678 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=19, ppid=17, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=963d0586451c5d763fb7c40b77e2f999, ASSIGN 2024-11-09T21:39:30,680 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=18, ppid=17, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestNs:TestTable, region=6b63665888c8e2240cbfb5b32190ca91, ASSIGN; state=OFFLINE, location=d86ff69d6719,45593,1731188365056; forceNewPlan=false, retain=false 2024-11-09T21:39:30,680 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=19, ppid=17, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestNs:TestTable, region=963d0586451c5d763fb7c40b77e2f999, ASSIGN; state=OFFLINE, location=d86ff69d6719,45707,1731188364951; forceNewPlan=false, retain=false 2024-11-09T21:39:30,830 INFO [d86ff69d6719:32969 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-09T21:39:30,831 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=18 updating hbase:meta row=6b63665888c8e2240cbfb5b32190ca91, regionState=OPENING, regionLocation=d86ff69d6719,45593,1731188365056 2024-11-09T21:39:30,831 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=19 updating hbase:meta row=963d0586451c5d763fb7c40b77e2f999, regionState=OPENING, regionLocation=d86ff69d6719,45707,1731188364951 2024-11-09T21:39:30,837 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=18, ppid=17, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestNs:TestTable, region=6b63665888c8e2240cbfb5b32190ca91, ASSIGN because future has completed 2024-11-09T21:39:30,838 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=20, ppid=18, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6b63665888c8e2240cbfb5b32190ca91, server=d86ff69d6719,45593,1731188365056}] 2024-11-09T21:39:30,839 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=19, ppid=17, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestNs:TestTable, region=963d0586451c5d763fb7c40b77e2f999, ASSIGN because future has completed 2024-11-09T21:39:30,841 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=21, ppid=19, state=RUNNABLE, hasLock=false; OpenRegionProcedure 963d0586451c5d763fb7c40b77e2f999, server=d86ff69d6719,45707,1731188364951}] 2024-11-09T21:39:30,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=17 2024-11-09T21:39:31,004 INFO [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] handler.AssignRegionHandler(132): Open TestNs:TestTable,,1731188370631.6b63665888c8e2240cbfb5b32190ca91. 2024-11-09T21:39:31,005 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7752): Opening region: {ENCODED => 6b63665888c8e2240cbfb5b32190ca91, NAME => 'TestNs:TestTable,,1731188370631.6b63665888c8e2240cbfb5b32190ca91.', STARTKEY => '', ENDKEY => '1'} 2024-11-09T21:39:31,005 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestTable 6b63665888c8e2240cbfb5b32190ca91 2024-11-09T21:39:31,006 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(898): Instantiated TestNs:TestTable,,1731188370631.6b63665888c8e2240cbfb5b32190ca91.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T21:39:31,006 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7794): checking encryption for 6b63665888c8e2240cbfb5b32190ca91 2024-11-09T21:39:31,006 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7797): checking classloading for 6b63665888c8e2240cbfb5b32190ca91 2024-11-09T21:39:31,006 INFO [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] handler.AssignRegionHandler(132): Open TestNs:TestTable,1,1731188370631.963d0586451c5d763fb7c40b77e2f999. 2024-11-09T21:39:31,006 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(7752): Opening region: {ENCODED => 963d0586451c5d763fb7c40b77e2f999, NAME => 'TestNs:TestTable,1,1731188370631.963d0586451c5d763fb7c40b77e2f999.', STARTKEY => '1', ENDKEY => ''} 2024-11-09T21:39:31,007 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestTable 963d0586451c5d763fb7c40b77e2f999 2024-11-09T21:39:31,007 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(898): Instantiated TestNs:TestTable,1,1731188370631.963d0586451c5d763fb7c40b77e2f999.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T21:39:31,007 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(7794): checking encryption for 963d0586451c5d763fb7c40b77e2f999 2024-11-09T21:39:31,007 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(7797): checking classloading for 963d0586451c5d763fb7c40b77e2f999 2024-11-09T21:39:31,016 INFO [StoreOpener-963d0586451c5d763fb7c40b77e2f999-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 963d0586451c5d763fb7c40b77e2f999 2024-11-09T21:39:31,017 INFO [StoreOpener-6b63665888c8e2240cbfb5b32190ca91-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 6b63665888c8e2240cbfb5b32190ca91 2024-11-09T21:39:31,018 INFO [StoreOpener-963d0586451c5d763fb7c40b77e2f999-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 963d0586451c5d763fb7c40b77e2f999 columnFamilyName cf 2024-11-09T21:39:31,019 DEBUG [StoreOpener-963d0586451c5d763fb7c40b77e2f999-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T21:39:31,019 INFO [StoreOpener-6b63665888c8e2240cbfb5b32190ca91-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6b63665888c8e2240cbfb5b32190ca91 columnFamilyName cf 2024-11-09T21:39:31,019 DEBUG [StoreOpener-6b63665888c8e2240cbfb5b32190ca91-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T21:39:31,019 INFO [StoreOpener-963d0586451c5d763fb7c40b77e2f999-1 {}] regionserver.HStore(327): Store=963d0586451c5d763fb7c40b77e2f999/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-09T21:39:31,019 INFO [StoreOpener-6b63665888c8e2240cbfb5b32190ca91-1 {}] regionserver.HStore(327): Store=6b63665888c8e2240cbfb5b32190ca91/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-09T21:39:31,020 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1038): replaying wal for 963d0586451c5d763fb7c40b77e2f999 2024-11-09T21:39:31,020 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1038): replaying wal for 6b63665888c8e2240cbfb5b32190ca91 2024-11-09T21:39:31,021 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/TestNs/TestTable/6b63665888c8e2240cbfb5b32190ca91 2024-11-09T21:39:31,021 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/TestNs/TestTable/963d0586451c5d763fb7c40b77e2f999 2024-11-09T21:39:31,021 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/TestNs/TestTable/963d0586451c5d763fb7c40b77e2f999 2024-11-09T21:39:31,021 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/TestNs/TestTable/6b63665888c8e2240cbfb5b32190ca91 2024-11-09T21:39:31,022 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1048): stopping wal replay for 963d0586451c5d763fb7c40b77e2f999 2024-11-09T21:39:31,022 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1048): stopping wal replay for 6b63665888c8e2240cbfb5b32190ca91 2024-11-09T21:39:31,022 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1060): Cleaning up temporary data for 6b63665888c8e2240cbfb5b32190ca91 2024-11-09T21:39:31,022 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1060): Cleaning up temporary data for 963d0586451c5d763fb7c40b77e2f999 2024-11-09T21:39:31,024 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1093): writing seq id for 6b63665888c8e2240cbfb5b32190ca91 2024-11-09T21:39:31,024 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1093): writing seq id for 963d0586451c5d763fb7c40b77e2f999 2024-11-09T21:39:31,027 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/TestNs/TestTable/963d0586451c5d763fb7c40b77e2f999/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-09T21:39:31,028 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/TestNs/TestTable/6b63665888c8e2240cbfb5b32190ca91/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-09T21:39:31,028 INFO [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1114): Opened 6b63665888c8e2240cbfb5b32190ca91; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67333609, jitterRate=0.0033489614725112915}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-09T21:39:31,028 INFO [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1114): Opened 963d0586451c5d763fb7c40b77e2f999; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74985358, jitterRate=0.1173689067363739}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-09T21:39:31,028 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 6b63665888c8e2240cbfb5b32190ca91 2024-11-09T21:39:31,028 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 963d0586451c5d763fb7c40b77e2f999 2024-11-09T21:39:31,029 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1006): Region open journal for 963d0586451c5d763fb7c40b77e2f999: Running coprocessor pre-open hook at 1731188371014Writing region info on filesystem at 1731188371014Initializing all the Stores at 1731188371016 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731188371016Cleaning up temporary data from old regions at 1731188371022 (+6 ms)Running coprocessor post-open hooks at 1731188371028 (+6 ms)Region opened successfully at 1731188371029 (+1 ms) 2024-11-09T21:39:31,029 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1006): Region open journal for 6b63665888c8e2240cbfb5b32190ca91: Running coprocessor pre-open hook at 1731188371014Writing region info on filesystem at 1731188371014Initializing all the Stores at 1731188371016 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731188371016Cleaning up temporary data from old regions at 1731188371022 (+6 ms)Running coprocessor post-open hooks at 1731188371028 (+6 ms)Region opened successfully at 1731188371029 (+1 ms) 2024-11-09T21:39:31,031 INFO [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegionServer(2236): Post open deploy tasks for TestNs:TestTable,,1731188370631.6b63665888c8e2240cbfb5b32190ca91., pid=20, masterSystemTime=1731188370993 2024-11-09T21:39:31,031 INFO [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegionServer(2236): Post open deploy tasks for TestNs:TestTable,1,1731188370631.963d0586451c5d763fb7c40b77e2f999., pid=21, masterSystemTime=1731188370995 2024-11-09T21:39:31,034 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegionServer(2266): Finished post open deploy task for TestNs:TestTable,,1731188370631.6b63665888c8e2240cbfb5b32190ca91. 2024-11-09T21:39:31,034 INFO [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] handler.AssignRegionHandler(153): Opened TestNs:TestTable,,1731188370631.6b63665888c8e2240cbfb5b32190ca91. 2024-11-09T21:39:31,035 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=18 updating hbase:meta row=6b63665888c8e2240cbfb5b32190ca91, regionState=OPEN, openSeqNum=2, regionLocation=d86ff69d6719,45593,1731188365056 2024-11-09T21:39:31,035 DEBUG [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegionServer(2266): Finished post open deploy task for TestNs:TestTable,1,1731188370631.963d0586451c5d763fb7c40b77e2f999. 2024-11-09T21:39:31,036 INFO [RS_OPEN_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] handler.AssignRegionHandler(153): Opened TestNs:TestTable,1,1731188370631.963d0586451c5d763fb7c40b77e2f999. 2024-11-09T21:39:31,037 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=19 updating hbase:meta row=963d0586451c5d763fb7c40b77e2f999, regionState=OPEN, openSeqNum=2, regionLocation=d86ff69d6719,45707,1731188364951 2024-11-09T21:39:31,039 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=20, ppid=18, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6b63665888c8e2240cbfb5b32190ca91, server=d86ff69d6719,45593,1731188365056 because future has completed 2024-11-09T21:39:31,041 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=21, ppid=19, state=RUNNABLE, hasLock=false; OpenRegionProcedure 963d0586451c5d763fb7c40b77e2f999, server=d86ff69d6719,45707,1731188364951 because future has completed 2024-11-09T21:39:31,043 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=20, resume processing ppid=18 2024-11-09T21:39:31,043 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=20, ppid=18, state=SUCCESS, hasLock=false; OpenRegionProcedure 6b63665888c8e2240cbfb5b32190ca91, server=d86ff69d6719,45593,1731188365056 in 202 msec 2024-11-09T21:39:31,045 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=21, resume processing ppid=19 2024-11-09T21:39:31,045 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=18, ppid=17, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=6b63665888c8e2240cbfb5b32190ca91, ASSIGN in 367 msec 2024-11-09T21:39:31,045 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=21, ppid=19, state=SUCCESS, hasLock=false; OpenRegionProcedure 963d0586451c5d763fb7c40b77e2f999, server=d86ff69d6719,45707,1731188364951 in 201 msec 2024-11-09T21:39:31,049 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=19, resume processing ppid=17 2024-11-09T21:39:31,049 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=19, ppid=17, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=963d0586451c5d763fb7c40b77e2f999, ASSIGN in 369 msec 2024-11-09T21:39:31,050 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=17, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestNs:TestTable execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-09T21:39:31,050 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestNs:TestTable","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731188371050"}]},"ts":"1731188371050"} 2024-11-09T21:39:31,053 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=TestNs:TestTable, state=ENABLED in hbase:meta 2024-11-09T21:39:31,054 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=17, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestNs:TestTable execute state=CREATE_TABLE_POST_OPERATION 2024-11-09T21:39:31,057 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=17, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestNs:TestTable in 423 msec 2024-11-09T21:39:31,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=17 2024-11-09T21:39:31,411 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: TestNs:TestTable completed 2024-11-09T21:39:31,411 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestNs:TestTable get assigned. Timeout = 60000ms 2024-11-09T21:39:31,411 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-09T21:39:31,418 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestNs:TestTable assigned to meta. Checking AM states. 2024-11-09T21:39:31,419 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-09T21:39:31,419 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestNs:TestTable assigned. 2024-11-09T21:39:31,419 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-11-09T21:39:31,421 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestNs:TestTable,, stopping at row=TestNs:TestTable ,, for max=2147483647 with caching=100 2024-11-09T21:39:31,428 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestNs:TestTable,, stopping at row=TestNs:TestTable ,, for max=2147483647 with caching=100 2024-11-09T21:39:31,450 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: quotas.TestClusterScopeQuotaThrottle#testUserTableClusterScopeQuota Thread=305, OpenFileDescriptor=549, MaxFileDescriptor=1048576, SystemLoadAverage=117, ProcessCount=11, AvailableMemoryMB=18230 2024-11-09T21:39:31,462 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:quota', row='u.jenkins', locateType=CURRENT is [region=hbase:quota,,1731188367265.db7fd7a0f2c1cff1a9776ed1fa466bae., hostname=d86ff69d6719,45593,1731188365056, seqNum=2] 2024-11-09T21:39:31,463 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-09T21:39:31,464 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52969, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-09T21:39:31,761 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-09T21:39:31,761 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-09T21:39:31,761 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731191971509 bypass), TestNs=QuotaState(ts=1731191971509 bypass)} 2024-11-09T21:39:31,762 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestQuotaAdmin0=QuotaState(ts=1731191971509 bypass), TestNs:TestTable=QuotaState(ts=1731191971509 bypass), TestQuotaAdmin2=QuotaState(ts=1731191971509 bypass)} 2024-11-09T21:39:31,762 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1731191971509 [ TestNs:TestTable ])} 2024-11-09T21:39:31,762 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731191971509 bypass)} 2024-11-09T21:39:32,013 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-09T21:39:32,013 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-09T21:39:32,013 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731191971509 bypass), TestNs=QuotaState(ts=1731191971509 bypass)} 2024-11-09T21:39:32,013 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestNs:TestTable=QuotaState(ts=1731191971509 bypass), TestQuotaAdmin1=QuotaState(ts=1731191971509 bypass)} 2024-11-09T21:39:32,014 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1731191971509 [ TestNs:TestTable ])} 2024-11-09T21:39:32,014 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731191971509 bypass)} 2024-11-09T21:39:32,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45707 {}] quotas.RegionServerRpcQuotaManager(245): Throttling exception for user=jenkins table=TestNs:TestTable numWrites=0 numReads=1: number of read requests exceeded - wait 6mins, 0ms 2024-11-09T21:39:32,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45707 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Get size: 118 connection: 172.17.0.2:38160 deadline: 1731188382040, exception=org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms 2024-11-09T21:39:32,064 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestNs:TestTable,1,1731188370631.963d0586451c5d763fb7c40b77e2f999., hostname=d86ff69d6719,45707,1731188364951, seqNum=-1 , the old value is region=TestNs:TestTable,1,1731188370631.963d0586451c5d763fb7c40b77e2f999., hostname=d86ff69d6719,45707,1731188364951, seqNum=-1, error=org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-09T21:39:32,065 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestNs:TestTable,1,1731188370631.963d0586451c5d763fb7c40b77e2f999., hostname=d86ff69d6719,45707,1731188364951, seqNum=-1 is org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-09T21:39:32,065 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestNs:TestTable,1,1731188370631.963d0586451c5d763fb7c40b77e2f999., hostname=d86ff69d6719,45707,1731188364951, seqNum=-1 because the exception is null or not the one we care about 2024-11-09T21:39:32,066 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] backoff.HBaseServerExceptionPauseManager(61): RpcThrottlingException suggested pause of 360000000000ns which would exceed the timeout. We should throw instead. org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-09T21:39:32,067 ERROR [Time-limited test {}] quotas.ThrottleQuotaTestUtil(110): get failed after nRetries=10 org.apache.hadoop.hbase.client.RetriesExhaustedException: Failed after attempts=1, exceptions: 2024-11-09T21:39:32.065Z, org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.TableOverAsyncTable.get(TableOverAsyncTable.java:188) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doGets(ThrottleQuotaTestUtil.java:105) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.testUserTableClusterScopeQuota(TestClusterScopeQuotaThrottle.java:224) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] at --------Future.get--------(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.completeExceptionally(AsyncRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.tryScheduleRetry(AsyncRpcRetryingCaller.java:130) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:215) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 42 more Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-09T21:39:32,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45707 {}] quotas.RegionServerRpcQuotaManager(245): Throttling exception for user=jenkins table=TestNs:TestTable numWrites=0 numReads=1: number of read requests exceeded - wait 6mins, 0ms 2024-11-09T21:39:32,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45707 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Get size: 117 connection: 172.17.0.2:38160 deadline: 1731188382069, exception=org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms 2024-11-09T21:39:32,071 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestNs:TestTable,1,1731188370631.963d0586451c5d763fb7c40b77e2f999., hostname=d86ff69d6719,45707,1731188364951, seqNum=-1 , the old value is region=TestNs:TestTable,1,1731188370631.963d0586451c5d763fb7c40b77e2f999., hostname=d86ff69d6719,45707,1731188364951, seqNum=-1, error=org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-09T21:39:32,071 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestNs:TestTable,1,1731188370631.963d0586451c5d763fb7c40b77e2f999., hostname=d86ff69d6719,45707,1731188364951, seqNum=-1 is org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-09T21:39:32,072 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestNs:TestTable,1,1731188370631.963d0586451c5d763fb7c40b77e2f999., hostname=d86ff69d6719,45707,1731188364951, seqNum=-1 because the exception is null or not the one we care about 2024-11-09T21:39:32,072 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] backoff.HBaseServerExceptionPauseManager(61): RpcThrottlingException suggested pause of 360000000000ns which would exceed the timeout. We should throw instead. org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-09T21:39:32,072 ERROR [Time-limited test {}] quotas.ThrottleQuotaTestUtil(110): get failed after nRetries=0 org.apache.hadoop.hbase.client.RetriesExhaustedException: Failed after attempts=1, exceptions: 2024-11-09T21:39:32.072Z, org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.TableOverAsyncTable.get(TableOverAsyncTable.java:188) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doGets(ThrottleQuotaTestUtil.java:105) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.testUserTableClusterScopeQuota(TestClusterScopeQuotaThrottle.java:224) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] at --------Future.get--------(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.completeExceptionally(AsyncRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.tryScheduleRetry(AsyncRpcRetryingCaller.java:130) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:215) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 42 more Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-09T21:39:32,334 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-09T21:39:32,334 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-09T21:39:32,334 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731195571509 bypass), TestNs=QuotaState(ts=1731195571509 bypass)} 2024-11-09T21:39:32,334 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestQuotaAdmin0=QuotaState(ts=1731195571509 bypass), TestNs:TestTable=QuotaState(ts=1731195571509 bypass), TestQuotaAdmin2=QuotaState(ts=1731195571509 bypass)} 2024-11-09T21:39:32,334 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1731195571509 bypass)} 2024-11-09T21:39:32,334 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731195571509 bypass)} 2024-11-09T21:39:32,541 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-09T21:39:32,585 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-09T21:39:32,585 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-09T21:39:32,585 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731195571509 bypass), TestNs=QuotaState(ts=1731195571509 bypass)} 2024-11-09T21:39:32,585 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestNs:TestTable=QuotaState(ts=1731195571509 bypass), TestQuotaAdmin1=QuotaState(ts=1731195571509 bypass)} 2024-11-09T21:39:32,585 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1731195571509 bypass)} 2024-11-09T21:39:32,585 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731195571509 bypass)} 2024-11-09T21:39:32,591 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestQuotaAdmin0' 2024-11-09T21:39:32,593 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-09T21:39:32,594 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestNs:TestTable' 2024-11-09T21:39:32,594 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestQuotaAdmin2' 2024-11-09T21:39:32,595 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: quotas.TestClusterScopeQuotaThrottle#testUserTableClusterScopeQuota Thread=305 (was 305), OpenFileDescriptor=551 (was 549) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=117 (was 117), ProcessCount=11 (was 11), AvailableMemoryMB=18218 (was 18230) 2024-11-09T21:39:32,595 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestQuotaAdmin1' 2024-11-09T21:39:32,598 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:quota' 2024-11-09T21:39:32,605 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: quotas.TestClusterScopeQuotaThrottle#testUserNamespaceClusterScopeQuota Thread=305, OpenFileDescriptor=551, MaxFileDescriptor=1048576, SystemLoadAverage=117, ProcessCount=11, AvailableMemoryMB=18217 2024-11-09T21:39:32,870 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-09T21:39:32,871 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-11-09T21:39:33,121 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-09T21:39:33,121 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731199171609 bypass), TestNs=QuotaState(ts=1731199171609 bypass)} 2024-11-09T21:39:33,121 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestQuotaAdmin0=QuotaState(ts=1731199171609 bypass), TestNs:TestTable=QuotaState(ts=1731199171609 bypass), TestQuotaAdmin2=QuotaState(ts=1731199171609 bypass)} 2024-11-09T21:39:33,121 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1731199171609 [ default ])} 2024-11-09T21:39:33,121 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731199171609 bypass)} 2024-11-09T21:39:33,372 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-09T21:39:33,372 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-11-09T21:39:33,622 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-09T21:39:33,623 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731199171709 bypass), TestNs=QuotaState(ts=1731199171709 bypass)} 2024-11-09T21:39:33,623 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestNs:TestTable=QuotaState(ts=1731199171709 bypass), TestQuotaAdmin1=QuotaState(ts=1731199171709 bypass)} 2024-11-09T21:39:33,623 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1731199171709 [ default ])} 2024-11-09T21:39:33,623 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731199171709 bypass)} 2024-11-09T21:39:33,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45707 {}] quotas.RegionServerRpcQuotaManager(245): Throttling exception for user=jenkins table=TestQuotaAdmin0 numWrites=0 numReads=1: number of read requests exceeded - wait 12sec, 0ms 2024-11-09T21:39:33,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45707 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Get size: 115 connection: 172.17.0.2:38160 deadline: 1731188383636, exception=org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 12sec, 0ms 2024-11-09T21:39:33,638 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestQuotaAdmin0,,1731188367908.88d3dc3c67fc12641fcb33d88d7dd202., hostname=d86ff69d6719,45707,1731188364951, seqNum=-1 , the old value is region=TestQuotaAdmin0,,1731188367908.88d3dc3c67fc12641fcb33d88d7dd202., hostname=d86ff69d6719,45707,1731188364951, seqNum=-1, error=org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-09T21:39:33,638 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestQuotaAdmin0,,1731188367908.88d3dc3c67fc12641fcb33d88d7dd202., hostname=d86ff69d6719,45707,1731188364951, seqNum=-1 is org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-09T21:39:33,638 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestQuotaAdmin0,,1731188367908.88d3dc3c67fc12641fcb33d88d7dd202., hostname=d86ff69d6719,45707,1731188364951, seqNum=-1 because the exception is null or not the one we care about 2024-11-09T21:39:33,639 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] backoff.HBaseServerExceptionPauseManager(61): RpcThrottlingException suggested pause of 12000000000ns which would exceed the timeout. We should throw instead. org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-09T21:39:33,640 ERROR [Time-limited test {}] quotas.ThrottleQuotaTestUtil(110): get failed after nRetries=5 org.apache.hadoop.hbase.client.RetriesExhaustedException: Failed after attempts=1, exceptions: 2024-11-09T21:39:33.639Z, org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.TableOverAsyncTable.get(TableOverAsyncTable.java:188) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doGets(ThrottleQuotaTestUtil.java:105) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.testUserNamespaceClusterScopeQuota(TestClusterScopeQuotaThrottle.java:199) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] at --------Future.get--------(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.completeExceptionally(AsyncRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.tryScheduleRetry(AsyncRpcRetryingCaller.java:130) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:215) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 42 more Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-09T21:39:33,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45707 {}] quotas.RegionServerRpcQuotaManager(245): Throttling exception for user=jenkins table=TestQuotaAdmin0 numWrites=1 numReads=0: number of write requests exceeded - wait 10sec, 0ms 2024-11-09T21:39:33,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45707 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 143 connection: 172.17.0.2:38160 deadline: 1731188383662, exception=org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms 2024-11-09T21:39:33,664 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestQuotaAdmin0,,1731188367908.88d3dc3c67fc12641fcb33d88d7dd202., hostname=d86ff69d6719,45707,1731188364951, seqNum=-1 , the old value is region=TestQuotaAdmin0,,1731188367908.88d3dc3c67fc12641fcb33d88d7dd202., hostname=d86ff69d6719,45707,1731188364951, seqNum=-1, error=org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-09T21:39:33,664 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestQuotaAdmin0,,1731188367908.88d3dc3c67fc12641fcb33d88d7dd202., hostname=d86ff69d6719,45707,1731188364951, seqNum=-1 is org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-09T21:39:33,664 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestQuotaAdmin0,,1731188367908.88d3dc3c67fc12641fcb33d88d7dd202., hostname=d86ff69d6719,45707,1731188364951, seqNum=-1 because the exception is null or not the one we care about 2024-11-09T21:39:33,664 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] backoff.HBaseServerExceptionPauseManager(61): RpcThrottlingException suggested pause of 10000000000ns which would exceed the timeout. We should throw instead. org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-09T21:39:33,666 ERROR [Time-limited test {}] quotas.ThrottleQuotaTestUtil(86): put failed after nRetries=6 org.apache.hadoop.hbase.client.RetriesExhaustedException: Failed after attempts=1, exceptions: 2024-11-09T21:39:33.664Z, org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.TableOverAsyncTable.put(TableOverAsyncTable.java:213) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doPuts(ThrottleQuotaTestUtil.java:81) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doPuts(ThrottleQuotaTestUtil.java:64) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.testUserNamespaceClusterScopeQuota(TestClusterScopeQuotaThrottle.java:200) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] at --------Future.get--------(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.completeExceptionally(AsyncRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.tryScheduleRetry(AsyncRpcRetryingCaller.java:130) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:215) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 42 more Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-09T21:39:33,923 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-09T21:39:33,923 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-09T21:39:33,923 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731202771709 bypass), TestNs=QuotaState(ts=1731202771709 bypass)} 2024-11-09T21:39:33,923 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestQuotaAdmin0=QuotaState(ts=1731202771709 bypass), TestNs:TestTable=QuotaState(ts=1731202771709 bypass), TestQuotaAdmin2=QuotaState(ts=1731202771709 bypass)} 2024-11-09T21:39:33,923 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1731202771709 bypass)} 2024-11-09T21:39:33,923 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731202771709 bypass)} 2024-11-09T21:39:34,174 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-09T21:39:34,174 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-09T21:39:34,174 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731202771709 bypass), TestNs=QuotaState(ts=1731202771709 bypass)} 2024-11-09T21:39:34,174 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestNs:TestTable=QuotaState(ts=1731202771709 bypass), TestQuotaAdmin1=QuotaState(ts=1731202771709 bypass)} 2024-11-09T21:39:34,174 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1731202771709 bypass)} 2024-11-09T21:39:34,174 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731202771709 bypass)} 2024-11-09T21:39:34,189 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: quotas.TestClusterScopeQuotaThrottle#testUserNamespaceClusterScopeQuota Thread=305 (was 305), OpenFileDescriptor=551 (was 551), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=117 (was 117), ProcessCount=11 (was 11), AvailableMemoryMB=18196 (was 18217) 2024-11-09T21:39:34,203 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: quotas.TestClusterScopeQuotaThrottle#testUserClusterScopeQuota Thread=305, OpenFileDescriptor=551, MaxFileDescriptor=1048576, SystemLoadAverage=117, ProcessCount=11, AvailableMemoryMB=18195 2024-11-09T21:39:34,466 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-09T21:39:34,466 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-11-09T21:39:34,564 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-09T21:39:34,565 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-09T21:39:34,569 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestQuotaAdmin0 2024-11-09T21:39:34,569 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestQuotaAdmin0 Metrics about Tables on a single HBase RegionServer 2024-11-09T21:39:34,571 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestQuotaAdmin2 2024-11-09T21:39:34,571 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestQuotaAdmin2 Metrics about Tables on a single HBase RegionServer 2024-11-09T21:39:34,572 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_quota 2024-11-09T21:39:34,572 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_quota Metrics about Tables on a single HBase RegionServer 2024-11-09T21:39:34,573 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-09T21:39:34,573 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-09T21:39:34,574 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_TestNs_table_TestTable 2024-11-09T21:39:34,574 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_TestNs_table_TestTable Metrics about Tables on a single HBase RegionServer 2024-11-09T21:39:34,575 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.quotas.MasterQuotasObserver 2024-11-09T21:39:34,575 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.quotas.MasterQuotasObserver Metrics about HBase MasterObservers 2024-11-09T21:39:34,575 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-09T21:39:34,575 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-09T21:39:34,575 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestQuotaAdmin1 2024-11-09T21:39:34,575 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestQuotaAdmin1 Metrics about Tables on a single HBase RegionServer 2024-11-09T21:39:34,717 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-09T21:39:34,717 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731206371809 bypass), TestNs=QuotaState(ts=1731206371809 bypass)} 2024-11-09T21:39:34,717 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestQuotaAdmin0=QuotaState(ts=1731206371809 bypass), TestNs:TestTable=QuotaState(ts=1731206371809 bypass), TestQuotaAdmin2=QuotaState(ts=1731206371809 bypass)} 2024-11-09T21:39:34,717 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1731206371809 global-limiter)} 2024-11-09T21:39:34,717 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731206371809 bypass)} 2024-11-09T21:39:34,968 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-09T21:39:34,969 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-11-09T21:39:35,220 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-09T21:39:35,220 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731206371909 bypass), TestNs=QuotaState(ts=1731206371909 bypass)} 2024-11-09T21:39:35,220 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestNs:TestTable=QuotaState(ts=1731206371909 bypass), TestQuotaAdmin1=QuotaState(ts=1731206371909 bypass)} 2024-11-09T21:39:35,220 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1731206371909 global-limiter)} 2024-11-09T21:39:35,220 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731206371909 bypass)} 2024-11-09T21:39:35,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45707 {}] quotas.RegionServerRpcQuotaManager(245): Throttling exception for user=jenkins table=TestQuotaAdmin0 numWrites=1 numReads=0: number of write requests exceeded - wait 10sec, 0ms 2024-11-09T21:39:35,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45707 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 143 connection: 172.17.0.2:38160 deadline: 1731188385246, exception=org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms 2024-11-09T21:39:35,249 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestQuotaAdmin0,,1731188367908.88d3dc3c67fc12641fcb33d88d7dd202., hostname=d86ff69d6719,45707,1731188364951, seqNum=-1 , the old value is region=TestQuotaAdmin0,,1731188367908.88d3dc3c67fc12641fcb33d88d7dd202., hostname=d86ff69d6719,45707,1731188364951, seqNum=-1, error=org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-09T21:39:35,249 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestQuotaAdmin0,,1731188367908.88d3dc3c67fc12641fcb33d88d7dd202., hostname=d86ff69d6719,45707,1731188364951, seqNum=-1 is org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-09T21:39:35,249 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestQuotaAdmin0,,1731188367908.88d3dc3c67fc12641fcb33d88d7dd202., hostname=d86ff69d6719,45707,1731188364951, seqNum=-1 because the exception is null or not the one we care about 2024-11-09T21:39:35,249 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] backoff.HBaseServerExceptionPauseManager(61): RpcThrottlingException suggested pause of 10000000000ns which would exceed the timeout. We should throw instead. org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-09T21:39:35,250 ERROR [Time-limited test {}] quotas.ThrottleQuotaTestUtil(86): put failed after nRetries=6 org.apache.hadoop.hbase.client.RetriesExhaustedException: Failed after attempts=1, exceptions: 2024-11-09T21:39:35.249Z, org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.TableOverAsyncTable.put(TableOverAsyncTable.java:213) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doPuts(ThrottleQuotaTestUtil.java:81) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doPuts(ThrottleQuotaTestUtil.java:64) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.testUserClusterScopeQuota(TestClusterScopeQuotaThrottle.java:178) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] at --------Future.get--------(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.completeExceptionally(AsyncRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.tryScheduleRetry(AsyncRpcRetryingCaller.java:130) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:215) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 42 more Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-09T21:39:35,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45707 {}] quotas.RegionServerRpcQuotaManager(245): Throttling exception for user=jenkins table=TestQuotaAdmin0 numWrites=0 numReads=1: number of read requests exceeded - wait 20sec, 0ms 2024-11-09T21:39:35,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45707 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Get size: 115 connection: 172.17.0.2:38160 deadline: 1731188385262, exception=org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 20sec, 0ms 2024-11-09T21:39:35,265 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestQuotaAdmin0,,1731188367908.88d3dc3c67fc12641fcb33d88d7dd202., hostname=d86ff69d6719,45707,1731188364951, seqNum=-1 , the old value is region=TestQuotaAdmin0,,1731188367908.88d3dc3c67fc12641fcb33d88d7dd202., hostname=d86ff69d6719,45707,1731188364951, seqNum=-1, error=org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 20sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-09T21:39:35,266 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestQuotaAdmin0,,1731188367908.88d3dc3c67fc12641fcb33d88d7dd202., hostname=d86ff69d6719,45707,1731188364951, seqNum=-1 is org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 20sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-09T21:39:35,266 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestQuotaAdmin0,,1731188367908.88d3dc3c67fc12641fcb33d88d7dd202., hostname=d86ff69d6719,45707,1731188364951, seqNum=-1 because the exception is null or not the one we care about 2024-11-09T21:39:35,266 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] backoff.HBaseServerExceptionPauseManager(61): RpcThrottlingException suggested pause of 20000000000ns which would exceed the timeout. We should throw instead. org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 20sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 20sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-09T21:39:35,267 ERROR [Time-limited test {}] quotas.ThrottleQuotaTestUtil(110): get failed after nRetries=3 org.apache.hadoop.hbase.client.RetriesExhaustedException: Failed after attempts=1, exceptions: 2024-11-09T21:39:35.266Z, org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 20sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.TableOverAsyncTable.get(TableOverAsyncTable.java:188) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doGets(ThrottleQuotaTestUtil.java:105) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.testUserClusterScopeQuota(TestClusterScopeQuotaThrottle.java:179) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] at --------Future.get--------(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.completeExceptionally(AsyncRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.tryScheduleRetry(AsyncRpcRetryingCaller.java:130) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:215) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 20sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 42 more Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 20sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-09T21:39:35,527 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-09T21:39:35,527 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-09T21:39:35,527 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731209971909 bypass), TestNs=QuotaState(ts=1731209971909 bypass)} 2024-11-09T21:39:35,527 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestQuotaAdmin0=QuotaState(ts=1731209971909 bypass), TestNs:TestTable=QuotaState(ts=1731209971909 bypass), TestQuotaAdmin2=QuotaState(ts=1731209971909 bypass)} 2024-11-09T21:39:35,527 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1731209971909 bypass)} 2024-11-09T21:39:35,528 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731209971909 bypass)} 2024-11-09T21:39:35,778 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-09T21:39:35,778 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-09T21:39:35,778 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731209971909 bypass), TestNs=QuotaState(ts=1731209971909 bypass)} 2024-11-09T21:39:35,779 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestNs:TestTable=QuotaState(ts=1731209971909 bypass), TestQuotaAdmin1=QuotaState(ts=1731209971909 bypass)} 2024-11-09T21:39:35,779 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1731209971909 bypass)} 2024-11-09T21:39:35,779 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731209971909 bypass)} 2024-11-09T21:39:35,792 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: quotas.TestClusterScopeQuotaThrottle#testUserClusterScopeQuota Thread=303 (was 305), OpenFileDescriptor=547 (was 551), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=117 (was 117), ProcessCount=11 (was 11), AvailableMemoryMB=18137 (was 18195) 2024-11-09T21:39:35,801 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: quotas.TestClusterScopeQuotaThrottle#testTableClusterScopeQuota Thread=303, OpenFileDescriptor=547, MaxFileDescriptor=1048576, SystemLoadAverage=117, ProcessCount=11, AvailableMemoryMB=18135 2024-11-09T21:39:36,058 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-09T21:39:36,058 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-09T21:39:36,058 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731213571909 bypass), TestNs=QuotaState(ts=1731213571909 bypass)} 2024-11-09T21:39:36,061 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestQuotaAdmin0=QuotaState(ts=1731213571909 bypass), TestNs:TestTable=QuotaState(ts=1731213571909 TimeBasedLimiter( readReqs=AverageIntervalRateLimiter(avail=10 limit=10 tunit=3600000))), TestQuotaAdmin2=QuotaState(ts=1731213571909 bypass)} 2024-11-09T21:39:36,061 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {} 2024-11-09T21:39:36,061 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731213571909 bypass)} 2024-11-09T21:39:36,312 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-09T21:39:36,312 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-09T21:39:36,312 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731213571909 bypass), TestNs=QuotaState(ts=1731213571909 bypass)} 2024-11-09T21:39:36,312 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestNs:TestTable=QuotaState(ts=1731213571909 TimeBasedLimiter( readReqs=AverageIntervalRateLimiter(avail=10 limit=10 tunit=3600000))), TestQuotaAdmin1=QuotaState(ts=1731213571909 bypass)} 2024-11-09T21:39:36,312 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {} 2024-11-09T21:39:36,312 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731213571909 bypass)} 2024-11-09T21:39:36,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45707 {}] quotas.RegionServerRpcQuotaManager(245): Throttling exception for user=jenkins table=TestNs:TestTable numWrites=0 numReads=1: number of read requests exceeded - wait 6mins, 0ms 2024-11-09T21:39:36,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45707 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Get size: 118 connection: 172.17.0.2:38160 deadline: 1731188386331, exception=org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms 2024-11-09T21:39:36,333 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestNs:TestTable,1,1731188370631.963d0586451c5d763fb7c40b77e2f999., hostname=d86ff69d6719,45707,1731188364951, seqNum=-1 , the old value is region=TestNs:TestTable,1,1731188370631.963d0586451c5d763fb7c40b77e2f999., hostname=d86ff69d6719,45707,1731188364951, seqNum=-1, error=org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-09T21:39:36,334 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestNs:TestTable,1,1731188370631.963d0586451c5d763fb7c40b77e2f999., hostname=d86ff69d6719,45707,1731188364951, seqNum=-1 is org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-09T21:39:36,334 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestNs:TestTable,1,1731188370631.963d0586451c5d763fb7c40b77e2f999., hostname=d86ff69d6719,45707,1731188364951, seqNum=-1 because the exception is null or not the one we care about 2024-11-09T21:39:36,334 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] backoff.HBaseServerExceptionPauseManager(61): RpcThrottlingException suggested pause of 360000000000ns which would exceed the timeout. We should throw instead. org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-09T21:39:36,334 ERROR [Time-limited test {}] quotas.ThrottleQuotaTestUtil(110): get failed after nRetries=10 org.apache.hadoop.hbase.client.RetriesExhaustedException: Failed after attempts=1, exceptions: 2024-11-09T21:39:36.334Z, org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.TableOverAsyncTable.get(TableOverAsyncTable.java:188) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doGets(ThrottleQuotaTestUtil.java:105) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.testTableClusterScopeQuota(TestClusterScopeQuotaThrottle.java:151) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] at --------Future.get--------(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.completeExceptionally(AsyncRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.tryScheduleRetry(AsyncRpcRetryingCaller.java:130) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:215) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 42 more Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-09T21:39:36,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45707 {}] quotas.RegionServerRpcQuotaManager(245): Throttling exception for user=jenkins table=TestNs:TestTable numWrites=0 numReads=1: number of read requests exceeded - wait 6mins, 0ms 2024-11-09T21:39:36,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45707 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Get size: 117 connection: 172.17.0.2:38160 deadline: 1731188386335, exception=org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms 2024-11-09T21:39:36,338 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestNs:TestTable,1,1731188370631.963d0586451c5d763fb7c40b77e2f999., hostname=d86ff69d6719,45707,1731188364951, seqNum=-1 , the old value is region=TestNs:TestTable,1,1731188370631.963d0586451c5d763fb7c40b77e2f999., hostname=d86ff69d6719,45707,1731188364951, seqNum=-1, error=org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-09T21:39:36,338 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestNs:TestTable,1,1731188370631.963d0586451c5d763fb7c40b77e2f999., hostname=d86ff69d6719,45707,1731188364951, seqNum=-1 is org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-09T21:39:36,338 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestNs:TestTable,1,1731188370631.963d0586451c5d763fb7c40b77e2f999., hostname=d86ff69d6719,45707,1731188364951, seqNum=-1 because the exception is null or not the one we care about 2024-11-09T21:39:36,338 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] backoff.HBaseServerExceptionPauseManager(61): RpcThrottlingException suggested pause of 360000000000ns which would exceed the timeout. We should throw instead. org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-09T21:39:36,339 ERROR [Time-limited test {}] quotas.ThrottleQuotaTestUtil(110): get failed after nRetries=0 org.apache.hadoop.hbase.client.RetriesExhaustedException: Failed after attempts=1, exceptions: 2024-11-09T21:39:36.338Z, org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.TableOverAsyncTable.get(TableOverAsyncTable.java:188) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doGets(ThrottleQuotaTestUtil.java:105) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.testTableClusterScopeQuota(TestClusterScopeQuotaThrottle.java:151) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] at --------Future.get--------(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.completeExceptionally(AsyncRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.tryScheduleRetry(AsyncRpcRetryingCaller.java:130) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:215) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 42 more Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-09T21:39:36,599 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-09T21:39:36,599 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-09T21:39:36,599 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731217171909 bypass), TestNs=QuotaState(ts=1731217171909 bypass)} 2024-11-09T21:39:36,599 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestQuotaAdmin0=QuotaState(ts=1731217171909 bypass), TestNs:TestTable=QuotaState(ts=1731217171909 bypass), TestQuotaAdmin2=QuotaState(ts=1731217171909 bypass)} 2024-11-09T21:39:36,599 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1731217171909 bypass)} 2024-11-09T21:39:36,599 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731217171909 bypass)} 2024-11-09T21:39:36,850 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-09T21:39:36,850 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-09T21:39:36,850 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731217171909 bypass), TestNs=QuotaState(ts=1731217171909 bypass)} 2024-11-09T21:39:36,851 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestNs:TestTable=QuotaState(ts=1731217171909 bypass), TestQuotaAdmin1=QuotaState(ts=1731217171909 bypass)} 2024-11-09T21:39:36,851 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {} 2024-11-09T21:39:36,851 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731217171909 bypass)} 2024-11-09T21:39:36,868 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: quotas.TestClusterScopeQuotaThrottle#testTableClusterScopeQuota Thread=303 (was 303), OpenFileDescriptor=547 (was 547), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=132 (was 117) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=18133 (was 18135) 2024-11-09T21:39:36,876 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: quotas.TestClusterScopeQuotaThrottle#testNamespaceClusterScopeQuota Thread=303, OpenFileDescriptor=547, MaxFileDescriptor=1048576, SystemLoadAverage=132, ProcessCount=11, AvailableMemoryMB=18132 2024-11-09T21:39:37,138 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-09T21:39:37,139 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-09T21:39:37,139 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731220771909 TimeBasedLimiter( writeReqs=AverageIntervalRateLimiter(avail=5 limit=5 tunit=60000) readReqs=AverageIntervalRateLimiter(avail=6 limit=6 tunit=60000))), TestNs=QuotaState(ts=1731220771909 bypass)} 2024-11-09T21:39:37,139 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestQuotaAdmin0=QuotaState(ts=1731220771909 bypass), TestNs:TestTable=QuotaState(ts=1731220771909 bypass), TestQuotaAdmin2=QuotaState(ts=1731220771909 bypass)} 2024-11-09T21:39:37,139 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {} 2024-11-09T21:39:37,139 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731220771909 bypass)} 2024-11-09T21:39:37,390 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-09T21:39:37,390 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-09T21:39:37,390 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731220771909 TimeBasedLimiter( writeReqs=AverageIntervalRateLimiter(avail=5 limit=5 tunit=60000) readReqs=AverageIntervalRateLimiter(avail=6 limit=6 tunit=60000))), TestNs=QuotaState(ts=1731220771909 bypass)} 2024-11-09T21:39:37,390 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestNs:TestTable=QuotaState(ts=1731220771909 bypass), TestQuotaAdmin1=QuotaState(ts=1731220771909 bypass)} 2024-11-09T21:39:37,390 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {} 2024-11-09T21:39:37,390 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731220771909 bypass)} 2024-11-09T21:39:37,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45707 {}] quotas.RegionServerRpcQuotaManager(245): Throttling exception for user=jenkins table=TestQuotaAdmin0 numWrites=1 numReads=0: number of write requests exceeded - wait 12sec, 0ms 2024-11-09T21:39:37,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45707 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 143 connection: 172.17.0.2:38160 deadline: 1731188387404, exception=org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 12sec, 0ms 2024-11-09T21:39:37,406 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestQuotaAdmin0,,1731188367908.88d3dc3c67fc12641fcb33d88d7dd202., hostname=d86ff69d6719,45707,1731188364951, seqNum=-1 , the old value is region=TestQuotaAdmin0,,1731188367908.88d3dc3c67fc12641fcb33d88d7dd202., hostname=d86ff69d6719,45707,1731188364951, seqNum=-1, error=org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-09T21:39:37,406 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestQuotaAdmin0,,1731188367908.88d3dc3c67fc12641fcb33d88d7dd202., hostname=d86ff69d6719,45707,1731188364951, seqNum=-1 is org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-09T21:39:37,406 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestQuotaAdmin0,,1731188367908.88d3dc3c67fc12641fcb33d88d7dd202., hostname=d86ff69d6719,45707,1731188364951, seqNum=-1 because the exception is null or not the one we care about 2024-11-09T21:39:37,407 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] backoff.HBaseServerExceptionPauseManager(61): RpcThrottlingException suggested pause of 12000000000ns which would exceed the timeout. We should throw instead. org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-09T21:39:37,407 ERROR [Time-limited test {}] quotas.ThrottleQuotaTestUtil(86): put failed after nRetries=5 org.apache.hadoop.hbase.client.RetriesExhaustedException: Failed after attempts=1, exceptions: 2024-11-09T21:39:37.406Z, org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.TableOverAsyncTable.put(TableOverAsyncTable.java:213) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doPuts(ThrottleQuotaTestUtil.java:81) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doPuts(ThrottleQuotaTestUtil.java:64) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.testNamespaceClusterScopeQuota(TestClusterScopeQuotaThrottle.java:128) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] at --------Future.get--------(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.completeExceptionally(AsyncRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.tryScheduleRetry(AsyncRpcRetryingCaller.java:130) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:215) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 42 more Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-09T21:39:37,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45707 {}] quotas.RegionServerRpcQuotaManager(245): Throttling exception for user=jenkins table=TestQuotaAdmin0 numWrites=0 numReads=1: number of read requests exceeded - wait 10sec, 0ms 2024-11-09T21:39:37,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45707 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Get size: 115 connection: 172.17.0.2:38160 deadline: 1731188387420, exception=org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 10sec, 0ms 2024-11-09T21:39:37,422 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestQuotaAdmin0,,1731188367908.88d3dc3c67fc12641fcb33d88d7dd202., hostname=d86ff69d6719,45707,1731188364951, seqNum=-1 , the old value is region=TestQuotaAdmin0,,1731188367908.88d3dc3c67fc12641fcb33d88d7dd202., hostname=d86ff69d6719,45707,1731188364951, seqNum=-1, error=org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-09T21:39:37,423 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestQuotaAdmin0,,1731188367908.88d3dc3c67fc12641fcb33d88d7dd202., hostname=d86ff69d6719,45707,1731188364951, seqNum=-1 is org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-09T21:39:37,423 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestQuotaAdmin0,,1731188367908.88d3dc3c67fc12641fcb33d88d7dd202., hostname=d86ff69d6719,45707,1731188364951, seqNum=-1 because the exception is null or not the one we care about 2024-11-09T21:39:37,423 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] backoff.HBaseServerExceptionPauseManager(61): RpcThrottlingException suggested pause of 10000000000ns which would exceed the timeout. We should throw instead. org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-09T21:39:37,424 ERROR [Time-limited test {}] quotas.ThrottleQuotaTestUtil(110): get failed after nRetries=6 org.apache.hadoop.hbase.client.RetriesExhaustedException: Failed after attempts=1, exceptions: 2024-11-09T21:39:37.423Z, org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.TableOverAsyncTable.get(TableOverAsyncTable.java:188) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doGets(ThrottleQuotaTestUtil.java:105) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.testNamespaceClusterScopeQuota(TestClusterScopeQuotaThrottle.java:129) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] at --------Future.get--------(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.completeExceptionally(AsyncRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.tryScheduleRetry(AsyncRpcRetryingCaller.java:130) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:215) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 42 more Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-09T21:39:37,684 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-09T21:39:37,684 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-09T21:39:37,685 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731224371909 bypass), TestNs=QuotaState(ts=1731224371909 bypass)} 2024-11-09T21:39:37,685 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestQuotaAdmin0=QuotaState(ts=1731224371909 bypass), TestNs:TestTable=QuotaState(ts=1731224371909 bypass), TestQuotaAdmin2=QuotaState(ts=1731224371909 bypass)} 2024-11-09T21:39:37,685 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1731224371909 bypass)} 2024-11-09T21:39:37,685 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731224371909 bypass)} 2024-11-09T21:39:37,935 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-09T21:39:37,936 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-09T21:39:37,936 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731224371909 bypass), TestNs=QuotaState(ts=1731224371909 bypass)} 2024-11-09T21:39:37,936 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestNs:TestTable=QuotaState(ts=1731224371909 bypass), TestQuotaAdmin1=QuotaState(ts=1731224371909 bypass)} 2024-11-09T21:39:37,936 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {} 2024-11-09T21:39:37,936 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731224371909 bypass)} 2024-11-09T21:39:37,948 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: quotas.TestClusterScopeQuotaThrottle#testNamespaceClusterScopeQuota Thread=303 (was 303), OpenFileDescriptor=547 (was 547), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=132 (was 132), ProcessCount=11 (was 11), AvailableMemoryMB=18153 (was 18132) - AvailableMemoryMB LEAK? - 2024-11-09T21:39:37,952 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable TestQuotaAdmin0 2024-11-09T21:39:37,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] procedure2.ProcedureExecutor(1139): Stored pid=22, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=TestQuotaAdmin0 2024-11-09T21:39:37,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=22 2024-11-09T21:39:37,960 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin0","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731188377959"}]},"ts":"1731188377959"} 2024-11-09T21:39:37,962 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin0, state=DISABLING in hbase:meta 2024-11-09T21:39:37,962 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestQuotaAdmin0 to state=DISABLING 2024-11-09T21:39:37,965 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=TestQuotaAdmin0}] 2024-11-09T21:39:37,969 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=24, ppid=23, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin0, region=88d3dc3c67fc12641fcb33d88d7dd202, UNASSIGN}] 2024-11-09T21:39:37,971 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=24, ppid=23, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin0, region=88d3dc3c67fc12641fcb33d88d7dd202, UNASSIGN 2024-11-09T21:39:37,973 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=24 updating hbase:meta row=88d3dc3c67fc12641fcb33d88d7dd202, regionState=CLOSING, regionLocation=d86ff69d6719,45707,1731188364951 2024-11-09T21:39:37,976 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=24, ppid=23, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestQuotaAdmin0, region=88d3dc3c67fc12641fcb33d88d7dd202, UNASSIGN because future has completed 2024-11-09T21:39:37,976 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-09T21:39:37,976 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE, hasLock=false; CloseRegionProcedure 88d3dc3c67fc12641fcb33d88d7dd202, server=d86ff69d6719,45707,1731188364951}] 2024-11-09T21:39:38,134 INFO [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] handler.UnassignRegionHandler(122): Close 88d3dc3c67fc12641fcb33d88d7dd202 2024-11-09T21:39:38,134 DEBUG [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-09T21:39:38,134 DEBUG [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegion(1722): Closing 88d3dc3c67fc12641fcb33d88d7dd202, disabling compactions & flushes 2024-11-09T21:39:38,135 INFO [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegion(1755): Closing region TestQuotaAdmin0,,1731188367908.88d3dc3c67fc12641fcb33d88d7dd202. 2024-11-09T21:39:38,135 DEBUG [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegion(1776): Time limited wait for close lock on TestQuotaAdmin0,,1731188367908.88d3dc3c67fc12641fcb33d88d7dd202. 2024-11-09T21:39:38,135 DEBUG [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegion(1843): Acquired close lock on TestQuotaAdmin0,,1731188367908.88d3dc3c67fc12641fcb33d88d7dd202. after waiting 0 ms 2024-11-09T21:39:38,135 DEBUG [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegion(1853): Updates disabled for region TestQuotaAdmin0,,1731188367908.88d3dc3c67fc12641fcb33d88d7dd202. 2024-11-09T21:39:38,139 INFO [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegion(2902): Flushing 88d3dc3c67fc12641fcb33d88d7dd202 1/1 column families, dataSize=578 B heapSize=2.11 KB 2024-11-09T21:39:38,192 DEBUG [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/default/TestQuotaAdmin0/88d3dc3c67fc12641fcb33d88d7dd202/.tmp/cf/e67245b0f0ee4649a06179e7408e546e is 38, key is row-0/cf:q/1731188377392/Put/seqid=0 2024-11-09T21:39:38,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41017 is added to blk_1073741847_1023 (size=4967) 2024-11-09T21:39:38,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45239 is added to blk_1073741847_1023 (size=4967) 2024-11-09T21:39:38,208 INFO [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=578 B at sequenceid=21 (bloomFilter=false), to=hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/default/TestQuotaAdmin0/88d3dc3c67fc12641fcb33d88d7dd202/.tmp/cf/e67245b0f0ee4649a06179e7408e546e 2024-11-09T21:39:38,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=22 2024-11-09T21:39:38,265 DEBUG [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/default/TestQuotaAdmin0/88d3dc3c67fc12641fcb33d88d7dd202/.tmp/cf/e67245b0f0ee4649a06179e7408e546e as hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/default/TestQuotaAdmin0/88d3dc3c67fc12641fcb33d88d7dd202/cf/e67245b0f0ee4649a06179e7408e546e 2024-11-09T21:39:38,276 INFO [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/default/TestQuotaAdmin0/88d3dc3c67fc12641fcb33d88d7dd202/cf/e67245b0f0ee4649a06179e7408e546e, entries=6, sequenceid=21, filesize=4.9 K 2024-11-09T21:39:38,285 INFO [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegion(3140): Finished flush of dataSize ~578 B/578, heapSize ~2.09 KB/2144, currentSize=0 B/0 for 88d3dc3c67fc12641fcb33d88d7dd202 in 145ms, sequenceid=21, compaction requested=false 2024-11-09T21:39:38,293 DEBUG [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/default/TestQuotaAdmin0/88d3dc3c67fc12641fcb33d88d7dd202/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-09T21:39:38,296 INFO [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegion(1973): Closed TestQuotaAdmin0,,1731188367908.88d3dc3c67fc12641fcb33d88d7dd202. 2024-11-09T21:39:38,296 DEBUG [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegion(1676): Region close journal for 88d3dc3c67fc12641fcb33d88d7dd202: Waiting for close lock at 1731188378134Running coprocessor pre-close hooks at 1731188378134Disabling compacts and flushes for region at 1731188378134Disabling writes for close at 1731188378135 (+1 ms)Obtaining lock to block concurrent updates at 1731188378140 (+5 ms)Preparing flush snapshotting stores in 88d3dc3c67fc12641fcb33d88d7dd202 at 1731188378140Finished memstore snapshotting TestQuotaAdmin0,,1731188367908.88d3dc3c67fc12641fcb33d88d7dd202., syncing WAL and waiting on mvcc, flushsize=dataSize=578, getHeapSize=2144, getOffHeapSize=0, getCellsCount=17 at 1731188378149 (+9 ms)Flushing stores of TestQuotaAdmin0,,1731188367908.88d3dc3c67fc12641fcb33d88d7dd202. at 1731188378150 (+1 ms)Flushing 88d3dc3c67fc12641fcb33d88d7dd202/cf: creating writer at 1731188378153 (+3 ms)Flushing 88d3dc3c67fc12641fcb33d88d7dd202/cf: appending metadata at 1731188378186 (+33 ms)Flushing 88d3dc3c67fc12641fcb33d88d7dd202/cf: closing flushed file at 1731188378188 (+2 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4a5901d2: reopening flushed file at 1731188378263 (+75 ms)Finished flush of dataSize ~578 B/578, heapSize ~2.09 KB/2144, currentSize=0 B/0 for 88d3dc3c67fc12641fcb33d88d7dd202 in 145ms, sequenceid=21, compaction requested=false at 1731188378286 (+23 ms)Writing region close event to WAL at 1731188378288 (+2 ms)Running coprocessor post-close hooks at 1731188378294 (+6 ms)Closed at 1731188378296 (+2 ms) 2024-11-09T21:39:38,299 INFO [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] handler.UnassignRegionHandler(157): Closed 88d3dc3c67fc12641fcb33d88d7dd202 2024-11-09T21:39:38,300 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=24 updating hbase:meta row=88d3dc3c67fc12641fcb33d88d7dd202, regionState=CLOSED 2024-11-09T21:39:38,302 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=25, ppid=24, state=RUNNABLE, hasLock=false; CloseRegionProcedure 88d3dc3c67fc12641fcb33d88d7dd202, server=d86ff69d6719,45707,1731188364951 because future has completed 2024-11-09T21:39:38,307 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=25, resume processing ppid=24 2024-11-09T21:39:38,307 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=25, ppid=24, state=SUCCESS, hasLock=false; CloseRegionProcedure 88d3dc3c67fc12641fcb33d88d7dd202, server=d86ff69d6719,45707,1731188364951 in 328 msec 2024-11-09T21:39:38,310 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=24, resume processing ppid=23 2024-11-09T21:39:38,311 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=24, ppid=23, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin0, region=88d3dc3c67fc12641fcb33d88d7dd202, UNASSIGN in 338 msec 2024-11-09T21:39:38,315 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=23, resume processing ppid=22 2024-11-09T21:39:38,315 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=23, ppid=22, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=TestQuotaAdmin0 in 348 msec 2024-11-09T21:39:38,317 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin0","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731188378317"}]},"ts":"1731188378317"} 2024-11-09T21:39:38,323 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin0, state=DISABLED in hbase:meta 2024-11-09T21:39:38,323 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestQuotaAdmin0 to state=DISABLED 2024-11-09T21:39:38,328 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=22, state=SUCCESS, hasLock=false; DisableTableProcedure table=TestQuotaAdmin0 in 372 msec 2024-11-09T21:39:38,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=22 2024-11-09T21:39:38,730 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:TestQuotaAdmin0 completed 2024-11-09T21:39:38,736 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete TestQuotaAdmin0 2024-11-09T21:39:38,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] procedure2.ProcedureExecutor(1139): Stored pid=26, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=TestQuotaAdmin0 2024-11-09T21:39:38,742 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=26, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin0 2024-11-09T21:39:38,744 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=26, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin0 2024-11-09T21:39:38,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=26 2024-11-09T21:39:38,749 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/default/TestQuotaAdmin0/88d3dc3c67fc12641fcb33d88d7dd202 2024-11-09T21:39:38,753 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/default/TestQuotaAdmin0/88d3dc3c67fc12641fcb33d88d7dd202/cf, FileablePath, hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/default/TestQuotaAdmin0/88d3dc3c67fc12641fcb33d88d7dd202/recovered.edits] 2024-11-09T21:39:38,763 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/default/TestQuotaAdmin0/88d3dc3c67fc12641fcb33d88d7dd202/cf/e67245b0f0ee4649a06179e7408e546e to hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/archive/data/default/TestQuotaAdmin0/88d3dc3c67fc12641fcb33d88d7dd202/cf/e67245b0f0ee4649a06179e7408e546e 2024-11-09T21:39:38,767 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/default/TestQuotaAdmin0/88d3dc3c67fc12641fcb33d88d7dd202/recovered.edits/24.seqid to hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/archive/data/default/TestQuotaAdmin0/88d3dc3c67fc12641fcb33d88d7dd202/recovered.edits/24.seqid 2024-11-09T21:39:38,768 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/default/TestQuotaAdmin0/88d3dc3c67fc12641fcb33d88d7dd202 2024-11-09T21:39:38,768 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestQuotaAdmin0 regions 2024-11-09T21:39:38,774 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=26, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin0 2024-11-09T21:39:38,777 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45707 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-11-09T21:39:38,782 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestQuotaAdmin0 from hbase:meta 2024-11-09T21:39:38,785 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'TestQuotaAdmin0' descriptor. 2024-11-09T21:39:38,787 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=26, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin0 2024-11-09T21:39:38,787 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'TestQuotaAdmin0' from region states. 2024-11-09T21:39:38,787 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"TestQuotaAdmin0,,1731188367908.88d3dc3c67fc12641fcb33d88d7dd202.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731188378787"}]},"ts":"9223372036854775807"} 2024-11-09T21:39:38,790 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 1 regions from META 2024-11-09T21:39:38,790 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 88d3dc3c67fc12641fcb33d88d7dd202, NAME => 'TestQuotaAdmin0,,1731188367908.88d3dc3c67fc12641fcb33d88d7dd202.', STARTKEY => '', ENDKEY => ''}] 2024-11-09T21:39:38,790 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'TestQuotaAdmin0' as deleted. 2024-11-09T21:39:38,791 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"TestQuotaAdmin0","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731188378790"}]},"ts":"9223372036854775807"} 2024-11-09T21:39:38,793 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table TestQuotaAdmin0 state from META 2024-11-09T21:39:38,794 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=26, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin0 2024-11-09T21:39:38,796 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=26, state=SUCCESS, hasLock=false; DeleteTableProcedure table=TestQuotaAdmin0 in 57 msec 2024-11-09T21:39:39,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=26 2024-11-09T21:39:39,011 DEBUG [RPCClient-NioEventLoopGroup-5-4 {}] client.AsyncRegionLocator(219): Clear meta cache for TestQuotaAdmin0 2024-11-09T21:39:39,012 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:TestQuotaAdmin0 completed 2024-11-09T21:39:39,013 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable TestQuotaAdmin1 2024-11-09T21:39:39,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] procedure2.ProcedureExecutor(1139): Stored pid=27, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=TestQuotaAdmin1 2024-11-09T21:39:39,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=27 2024-11-09T21:39:39,022 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731188379022"}]},"ts":"1731188379022"} 2024-11-09T21:39:39,025 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin1, state=DISABLING in hbase:meta 2024-11-09T21:39:39,025 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestQuotaAdmin1 to state=DISABLING 2024-11-09T21:39:39,026 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=28, ppid=27, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=TestQuotaAdmin1}] 2024-11-09T21:39:39,028 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin1, region=2ca8c3dabfb91905eaecd57467637263, UNASSIGN}] 2024-11-09T21:39:39,029 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=29, ppid=28, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin1, region=2ca8c3dabfb91905eaecd57467637263, UNASSIGN 2024-11-09T21:39:39,030 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=29 updating hbase:meta row=2ca8c3dabfb91905eaecd57467637263, regionState=CLOSING, regionLocation=d86ff69d6719,45593,1731188365056 2024-11-09T21:39:39,032 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=29, ppid=28, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestQuotaAdmin1, region=2ca8c3dabfb91905eaecd57467637263, UNASSIGN because future has completed 2024-11-09T21:39:39,033 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-09T21:39:39,033 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=30, ppid=29, state=RUNNABLE, hasLock=false; CloseRegionProcedure 2ca8c3dabfb91905eaecd57467637263, server=d86ff69d6719,45593,1731188365056}] 2024-11-09T21:39:39,186 INFO [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=30}] handler.UnassignRegionHandler(122): Close 2ca8c3dabfb91905eaecd57467637263 2024-11-09T21:39:39,186 DEBUG [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=30}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-09T21:39:39,186 DEBUG [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=30}] regionserver.HRegion(1722): Closing 2ca8c3dabfb91905eaecd57467637263, disabling compactions & flushes 2024-11-09T21:39:39,186 INFO [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=30}] regionserver.HRegion(1755): Closing region TestQuotaAdmin1,,1731188368766.2ca8c3dabfb91905eaecd57467637263. 2024-11-09T21:39:39,186 DEBUG [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=30}] regionserver.HRegion(1776): Time limited wait for close lock on TestQuotaAdmin1,,1731188368766.2ca8c3dabfb91905eaecd57467637263. 2024-11-09T21:39:39,186 DEBUG [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=30}] regionserver.HRegion(1843): Acquired close lock on TestQuotaAdmin1,,1731188368766.2ca8c3dabfb91905eaecd57467637263. after waiting 0 ms 2024-11-09T21:39:39,187 DEBUG [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=30}] regionserver.HRegion(1853): Updates disabled for region TestQuotaAdmin1,,1731188368766.2ca8c3dabfb91905eaecd57467637263. 2024-11-09T21:39:39,192 DEBUG [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=30}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/default/TestQuotaAdmin1/2ca8c3dabfb91905eaecd57467637263/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-09T21:39:39,193 INFO [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=30}] regionserver.HRegion(1973): Closed TestQuotaAdmin1,,1731188368766.2ca8c3dabfb91905eaecd57467637263. 2024-11-09T21:39:39,193 DEBUG [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=30}] regionserver.HRegion(1676): Region close journal for 2ca8c3dabfb91905eaecd57467637263: Waiting for close lock at 1731188379186Running coprocessor pre-close hooks at 1731188379186Disabling compacts and flushes for region at 1731188379186Disabling writes for close at 1731188379186Writing region close event to WAL at 1731188379187 (+1 ms)Running coprocessor post-close hooks at 1731188379193 (+6 ms)Closed at 1731188379193 2024-11-09T21:39:39,196 INFO [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=30}] handler.UnassignRegionHandler(157): Closed 2ca8c3dabfb91905eaecd57467637263 2024-11-09T21:39:39,197 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=29 updating hbase:meta row=2ca8c3dabfb91905eaecd57467637263, regionState=CLOSED 2024-11-09T21:39:39,199 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=30, ppid=29, state=RUNNABLE, hasLock=false; CloseRegionProcedure 2ca8c3dabfb91905eaecd57467637263, server=d86ff69d6719,45593,1731188365056 because future has completed 2024-11-09T21:39:39,204 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=30, resume processing ppid=29 2024-11-09T21:39:39,205 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=30, ppid=29, state=SUCCESS, hasLock=false; CloseRegionProcedure 2ca8c3dabfb91905eaecd57467637263, server=d86ff69d6719,45593,1731188365056 in 168 msec 2024-11-09T21:39:39,207 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=29, resume processing ppid=28 2024-11-09T21:39:39,207 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=29, ppid=28, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin1, region=2ca8c3dabfb91905eaecd57467637263, UNASSIGN in 176 msec 2024-11-09T21:39:39,210 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=28, resume processing ppid=27 2024-11-09T21:39:39,210 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=28, ppid=27, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=TestQuotaAdmin1 in 182 msec 2024-11-09T21:39:39,212 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731188379212"}]},"ts":"1731188379212"} 2024-11-09T21:39:39,214 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin1, state=DISABLED in hbase:meta 2024-11-09T21:39:39,214 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestQuotaAdmin1 to state=DISABLED 2024-11-09T21:39:39,216 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=27, state=SUCCESS, hasLock=false; DisableTableProcedure table=TestQuotaAdmin1 in 201 msec 2024-11-09T21:39:39,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=27 2024-11-09T21:39:39,280 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:TestQuotaAdmin1 completed 2024-11-09T21:39:39,280 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete TestQuotaAdmin1 2024-11-09T21:39:39,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] procedure2.ProcedureExecutor(1139): Stored pid=31, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=TestQuotaAdmin1 2024-11-09T21:39:39,284 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=31, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin1 2024-11-09T21:39:39,285 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=31, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin1 2024-11-09T21:39:39,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-11-09T21:39:39,290 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/default/TestQuotaAdmin1/2ca8c3dabfb91905eaecd57467637263 2024-11-09T21:39:39,293 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/default/TestQuotaAdmin1/2ca8c3dabfb91905eaecd57467637263/cf, FileablePath, hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/default/TestQuotaAdmin1/2ca8c3dabfb91905eaecd57467637263/recovered.edits] 2024-11-09T21:39:39,301 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/default/TestQuotaAdmin1/2ca8c3dabfb91905eaecd57467637263/recovered.edits/4.seqid to hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/archive/data/default/TestQuotaAdmin1/2ca8c3dabfb91905eaecd57467637263/recovered.edits/4.seqid 2024-11-09T21:39:39,302 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/default/TestQuotaAdmin1/2ca8c3dabfb91905eaecd57467637263 2024-11-09T21:39:39,302 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestQuotaAdmin1 regions 2024-11-09T21:39:39,305 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=31, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin1 2024-11-09T21:39:39,308 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestQuotaAdmin1 from hbase:meta 2024-11-09T21:39:39,310 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'TestQuotaAdmin1' descriptor. 2024-11-09T21:39:39,312 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=31, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin1 2024-11-09T21:39:39,312 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'TestQuotaAdmin1' from region states. 2024-11-09T21:39:39,312 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"TestQuotaAdmin1,,1731188368766.2ca8c3dabfb91905eaecd57467637263.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731188379312"}]},"ts":"9223372036854775807"} 2024-11-09T21:39:39,315 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 1 regions from META 2024-11-09T21:39:39,315 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 2ca8c3dabfb91905eaecd57467637263, NAME => 'TestQuotaAdmin1,,1731188368766.2ca8c3dabfb91905eaecd57467637263.', STARTKEY => '', ENDKEY => ''}] 2024-11-09T21:39:39,315 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'TestQuotaAdmin1' as deleted. 2024-11-09T21:39:39,315 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"TestQuotaAdmin1","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731188379315"}]},"ts":"9223372036854775807"} 2024-11-09T21:39:39,318 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table TestQuotaAdmin1 state from META 2024-11-09T21:39:39,319 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=31, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin1 2024-11-09T21:39:39,321 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=31, state=SUCCESS, hasLock=false; DeleteTableProcedure table=TestQuotaAdmin1 in 39 msec 2024-11-09T21:39:39,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-11-09T21:39:39,549 DEBUG [RPCClient-NioEventLoopGroup-5-4 {}] client.AsyncRegionLocator(219): Clear meta cache for TestQuotaAdmin1 2024-11-09T21:39:39,549 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:TestQuotaAdmin1 completed 2024-11-09T21:39:39,550 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable TestQuotaAdmin2 2024-11-09T21:39:39,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] procedure2.ProcedureExecutor(1139): Stored pid=32, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=TestQuotaAdmin2 2024-11-09T21:39:39,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=32 2024-11-09T21:39:39,556 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin2","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731188379555"}]},"ts":"1731188379555"} 2024-11-09T21:39:39,559 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin2, state=DISABLING in hbase:meta 2024-11-09T21:39:39,559 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestQuotaAdmin2 to state=DISABLING 2024-11-09T21:39:39,560 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=TestQuotaAdmin2}] 2024-11-09T21:39:39,563 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=34, ppid=33, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin2, region=a3e9bd3eed656037630723db27e2ece1, UNASSIGN}] 2024-11-09T21:39:39,564 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=34, ppid=33, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin2, region=a3e9bd3eed656037630723db27e2ece1, UNASSIGN 2024-11-09T21:39:39,565 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=34 updating hbase:meta row=a3e9bd3eed656037630723db27e2ece1, regionState=CLOSING, regionLocation=d86ff69d6719,45707,1731188364951 2024-11-09T21:39:39,568 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=34, ppid=33, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestQuotaAdmin2, region=a3e9bd3eed656037630723db27e2ece1, UNASSIGN because future has completed 2024-11-09T21:39:39,568 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-09T21:39:39,568 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=35, ppid=34, state=RUNNABLE, hasLock=false; CloseRegionProcedure a3e9bd3eed656037630723db27e2ece1, server=d86ff69d6719,45707,1731188364951}] 2024-11-09T21:39:39,721 INFO [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(122): Close a3e9bd3eed656037630723db27e2ece1 2024-11-09T21:39:39,721 DEBUG [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-09T21:39:39,722 DEBUG [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1722): Closing a3e9bd3eed656037630723db27e2ece1, disabling compactions & flushes 2024-11-09T21:39:39,722 INFO [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1755): Closing region TestQuotaAdmin2,,1731188369566.a3e9bd3eed656037630723db27e2ece1. 2024-11-09T21:39:39,722 DEBUG [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1776): Time limited wait for close lock on TestQuotaAdmin2,,1731188369566.a3e9bd3eed656037630723db27e2ece1. 2024-11-09T21:39:39,722 DEBUG [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1843): Acquired close lock on TestQuotaAdmin2,,1731188369566.a3e9bd3eed656037630723db27e2ece1. after waiting 0 ms 2024-11-09T21:39:39,722 DEBUG [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1853): Updates disabled for region TestQuotaAdmin2,,1731188369566.a3e9bd3eed656037630723db27e2ece1. 2024-11-09T21:39:39,728 DEBUG [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/default/TestQuotaAdmin2/a3e9bd3eed656037630723db27e2ece1/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-09T21:39:39,730 INFO [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1973): Closed TestQuotaAdmin2,,1731188369566.a3e9bd3eed656037630723db27e2ece1. 2024-11-09T21:39:39,730 DEBUG [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1676): Region close journal for a3e9bd3eed656037630723db27e2ece1: Waiting for close lock at 1731188379722Running coprocessor pre-close hooks at 1731188379722Disabling compacts and flushes for region at 1731188379722Disabling writes for close at 1731188379722Writing region close event to WAL at 1731188379722Running coprocessor post-close hooks at 1731188379730 (+8 ms)Closed at 1731188379730 2024-11-09T21:39:39,733 INFO [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(157): Closed a3e9bd3eed656037630723db27e2ece1 2024-11-09T21:39:39,734 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=34 updating hbase:meta row=a3e9bd3eed656037630723db27e2ece1, regionState=CLOSED 2024-11-09T21:39:39,736 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=35, ppid=34, state=RUNNABLE, hasLock=false; CloseRegionProcedure a3e9bd3eed656037630723db27e2ece1, server=d86ff69d6719,45707,1731188364951 because future has completed 2024-11-09T21:39:39,741 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=35, resume processing ppid=34 2024-11-09T21:39:39,741 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=35, ppid=34, state=SUCCESS, hasLock=false; CloseRegionProcedure a3e9bd3eed656037630723db27e2ece1, server=d86ff69d6719,45707,1731188364951 in 170 msec 2024-11-09T21:39:39,749 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=34, resume processing ppid=33 2024-11-09T21:39:39,749 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=34, ppid=33, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin2, region=a3e9bd3eed656037630723db27e2ece1, UNASSIGN in 178 msec 2024-11-09T21:39:39,754 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=33, resume processing ppid=32 2024-11-09T21:39:39,754 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=33, ppid=32, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=TestQuotaAdmin2 in 190 msec 2024-11-09T21:39:39,757 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin2","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731188379756"}]},"ts":"1731188379756"} 2024-11-09T21:39:39,759 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin2, state=DISABLED in hbase:meta 2024-11-09T21:39:39,759 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestQuotaAdmin2 to state=DISABLED 2024-11-09T21:39:39,767 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=32, state=SUCCESS, hasLock=false; DisableTableProcedure table=TestQuotaAdmin2 in 211 msec 2024-11-09T21:39:39,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=32 2024-11-09T21:39:39,809 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:TestQuotaAdmin2 completed 2024-11-09T21:39:39,810 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete TestQuotaAdmin2 2024-11-09T21:39:39,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] procedure2.ProcedureExecutor(1139): Stored pid=36, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=TestQuotaAdmin2 2024-11-09T21:39:39,813 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=36, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin2 2024-11-09T21:39:39,814 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=36, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin2 2024-11-09T21:39:39,817 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/default/TestQuotaAdmin2/a3e9bd3eed656037630723db27e2ece1 2024-11-09T21:39:39,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=36 2024-11-09T21:39:39,820 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/default/TestQuotaAdmin2/a3e9bd3eed656037630723db27e2ece1/cf, FileablePath, hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/default/TestQuotaAdmin2/a3e9bd3eed656037630723db27e2ece1/recovered.edits] 2024-11-09T21:39:39,828 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/default/TestQuotaAdmin2/a3e9bd3eed656037630723db27e2ece1/recovered.edits/4.seqid to hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/archive/data/default/TestQuotaAdmin2/a3e9bd3eed656037630723db27e2ece1/recovered.edits/4.seqid 2024-11-09T21:39:39,828 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/default/TestQuotaAdmin2/a3e9bd3eed656037630723db27e2ece1 2024-11-09T21:39:39,829 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived TestQuotaAdmin2 regions 2024-11-09T21:39:39,832 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=36, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin2 2024-11-09T21:39:39,834 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestQuotaAdmin2 from hbase:meta 2024-11-09T21:39:39,837 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'TestQuotaAdmin2' descriptor. 2024-11-09T21:39:39,839 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=36, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin2 2024-11-09T21:39:39,839 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'TestQuotaAdmin2' from region states. 2024-11-09T21:39:39,839 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"TestQuotaAdmin2,,1731188369566.a3e9bd3eed656037630723db27e2ece1.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731188379839"}]},"ts":"9223372036854775807"} 2024-11-09T21:39:39,843 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 1 regions from META 2024-11-09T21:39:39,843 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => a3e9bd3eed656037630723db27e2ece1, NAME => 'TestQuotaAdmin2,,1731188369566.a3e9bd3eed656037630723db27e2ece1.', STARTKEY => '', ENDKEY => ''}] 2024-11-09T21:39:39,843 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'TestQuotaAdmin2' as deleted. 2024-11-09T21:39:39,844 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"TestQuotaAdmin2","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731188379844"}]},"ts":"9223372036854775807"} 2024-11-09T21:39:39,848 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table TestQuotaAdmin2 state from META 2024-11-09T21:39:39,849 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=36, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin2 2024-11-09T21:39:39,851 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=36, state=SUCCESS, hasLock=false; DeleteTableProcedure table=TestQuotaAdmin2 in 39 msec 2024-11-09T21:39:40,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=36 2024-11-09T21:39:40,080 DEBUG [RPCClient-NioEventLoopGroup-5-4 {}] client.AsyncRegionLocator(219): Clear meta cache for TestQuotaAdmin2 2024-11-09T21:39:40,081 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:TestQuotaAdmin2 completed 2024-11-09T21:39:40,082 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable TestNs:TestTable 2024-11-09T21:39:40,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] procedure2.ProcedureExecutor(1139): Stored pid=37, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=TestNs:TestTable 2024-11-09T21:39:40,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=37 2024-11-09T21:39:40,091 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestNs:TestTable","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731188380090"}]},"ts":"1731188380090"} 2024-11-09T21:39:40,093 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestNs:TestTable, state=DISABLING in hbase:meta 2024-11-09T21:39:40,093 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestNs:TestTable to state=DISABLING 2024-11-09T21:39:40,094 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=38, ppid=37, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=TestNs:TestTable}] 2024-11-09T21:39:40,096 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=6b63665888c8e2240cbfb5b32190ca91, UNASSIGN}, {pid=40, ppid=38, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=963d0586451c5d763fb7c40b77e2f999, UNASSIGN}] 2024-11-09T21:39:40,097 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=39, ppid=38, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=6b63665888c8e2240cbfb5b32190ca91, UNASSIGN 2024-11-09T21:39:40,097 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=40, ppid=38, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=963d0586451c5d763fb7c40b77e2f999, UNASSIGN 2024-11-09T21:39:40,098 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=40 updating hbase:meta row=963d0586451c5d763fb7c40b77e2f999, regionState=CLOSING, regionLocation=d86ff69d6719,45707,1731188364951 2024-11-09T21:39:40,098 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=39 updating hbase:meta row=6b63665888c8e2240cbfb5b32190ca91, regionState=CLOSING, regionLocation=d86ff69d6719,45593,1731188365056 2024-11-09T21:39:40,100 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=39, ppid=38, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestNs:TestTable, region=6b63665888c8e2240cbfb5b32190ca91, UNASSIGN because future has completed 2024-11-09T21:39:40,100 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-09T21:39:40,101 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=41, ppid=39, state=RUNNABLE, hasLock=false; CloseRegionProcedure 6b63665888c8e2240cbfb5b32190ca91, server=d86ff69d6719,45593,1731188365056}] 2024-11-09T21:39:40,101 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=40, ppid=38, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestNs:TestTable, region=963d0586451c5d763fb7c40b77e2f999, UNASSIGN because future has completed 2024-11-09T21:39:40,102 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-09T21:39:40,102 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=42, ppid=40, state=RUNNABLE, hasLock=false; CloseRegionProcedure 963d0586451c5d763fb7c40b77e2f999, server=d86ff69d6719,45707,1731188364951}] 2024-11-09T21:39:40,255 INFO [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] handler.UnassignRegionHandler(122): Close 6b63665888c8e2240cbfb5b32190ca91 2024-11-09T21:39:40,255 DEBUG [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-09T21:39:40,256 INFO [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(122): Close 963d0586451c5d763fb7c40b77e2f999 2024-11-09T21:39:40,256 DEBUG [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-09T21:39:40,259 DEBUG [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1722): Closing 963d0586451c5d763fb7c40b77e2f999, disabling compactions & flushes 2024-11-09T21:39:40,259 DEBUG [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1722): Closing 6b63665888c8e2240cbfb5b32190ca91, disabling compactions & flushes 2024-11-09T21:39:40,259 INFO [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1755): Closing region TestNs:TestTable,,1731188370631.6b63665888c8e2240cbfb5b32190ca91. 2024-11-09T21:39:40,259 INFO [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1755): Closing region TestNs:TestTable,1,1731188370631.963d0586451c5d763fb7c40b77e2f999. 2024-11-09T21:39:40,259 DEBUG [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1776): Time limited wait for close lock on TestNs:TestTable,,1731188370631.6b63665888c8e2240cbfb5b32190ca91. 2024-11-09T21:39:40,259 DEBUG [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1776): Time limited wait for close lock on TestNs:TestTable,1,1731188370631.963d0586451c5d763fb7c40b77e2f999. 2024-11-09T21:39:40,259 DEBUG [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1843): Acquired close lock on TestNs:TestTable,,1731188370631.6b63665888c8e2240cbfb5b32190ca91. after waiting 0 ms 2024-11-09T21:39:40,260 DEBUG [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1843): Acquired close lock on TestNs:TestTable,1,1731188370631.963d0586451c5d763fb7c40b77e2f999. after waiting 0 ms 2024-11-09T21:39:40,260 DEBUG [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1853): Updates disabled for region TestNs:TestTable,,1731188370631.6b63665888c8e2240cbfb5b32190ca91. 2024-11-09T21:39:40,260 DEBUG [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1853): Updates disabled for region TestNs:TestTable,1,1731188370631.963d0586451c5d763fb7c40b77e2f999. 2024-11-09T21:39:40,266 DEBUG [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/TestNs/TestTable/963d0586451c5d763fb7c40b77e2f999/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-09T21:39:40,266 DEBUG [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/TestNs/TestTable/6b63665888c8e2240cbfb5b32190ca91/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-09T21:39:40,267 INFO [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1973): Closed TestNs:TestTable,1,1731188370631.963d0586451c5d763fb7c40b77e2f999. 2024-11-09T21:39:40,267 DEBUG [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1676): Region close journal for 963d0586451c5d763fb7c40b77e2f999: Waiting for close lock at 1731188380258Running coprocessor pre-close hooks at 1731188380259 (+1 ms)Disabling compacts and flushes for region at 1731188380259Disabling writes for close at 1731188380260 (+1 ms)Writing region close event to WAL at 1731188380260Running coprocessor post-close hooks at 1731188380267 (+7 ms)Closed at 1731188380267 2024-11-09T21:39:40,267 INFO [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1973): Closed TestNs:TestTable,,1731188370631.6b63665888c8e2240cbfb5b32190ca91. 2024-11-09T21:39:40,267 DEBUG [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1676): Region close journal for 6b63665888c8e2240cbfb5b32190ca91: Waiting for close lock at 1731188380259Running coprocessor pre-close hooks at 1731188380259Disabling compacts and flushes for region at 1731188380259Disabling writes for close at 1731188380260 (+1 ms)Writing region close event to WAL at 1731188380260Running coprocessor post-close hooks at 1731188380267 (+7 ms)Closed at 1731188380267 2024-11-09T21:39:40,269 INFO [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] handler.UnassignRegionHandler(157): Closed 6b63665888c8e2240cbfb5b32190ca91 2024-11-09T21:39:40,270 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=39 updating hbase:meta row=6b63665888c8e2240cbfb5b32190ca91, regionState=CLOSED 2024-11-09T21:39:40,270 INFO [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(157): Closed 963d0586451c5d763fb7c40b77e2f999 2024-11-09T21:39:40,271 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=40 updating hbase:meta row=963d0586451c5d763fb7c40b77e2f999, regionState=CLOSED 2024-11-09T21:39:40,272 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=41, ppid=39, state=RUNNABLE, hasLock=false; CloseRegionProcedure 6b63665888c8e2240cbfb5b32190ca91, server=d86ff69d6719,45593,1731188365056 because future has completed 2024-11-09T21:39:40,273 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=42, ppid=40, state=RUNNABLE, hasLock=false; CloseRegionProcedure 963d0586451c5d763fb7c40b77e2f999, server=d86ff69d6719,45707,1731188364951 because future has completed 2024-11-09T21:39:40,275 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=41, resume processing ppid=39 2024-11-09T21:39:40,276 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=41, ppid=39, state=SUCCESS, hasLock=false; CloseRegionProcedure 6b63665888c8e2240cbfb5b32190ca91, server=d86ff69d6719,45593,1731188365056 in 173 msec 2024-11-09T21:39:40,277 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=42, resume processing ppid=40 2024-11-09T21:39:40,277 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=39, ppid=38, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=6b63665888c8e2240cbfb5b32190ca91, UNASSIGN in 179 msec 2024-11-09T21:39:40,277 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=42, ppid=40, state=SUCCESS, hasLock=false; CloseRegionProcedure 963d0586451c5d763fb7c40b77e2f999, server=d86ff69d6719,45707,1731188364951 in 173 msec 2024-11-09T21:39:40,279 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=40, resume processing ppid=38 2024-11-09T21:39:40,279 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=40, ppid=38, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=963d0586451c5d763fb7c40b77e2f999, UNASSIGN in 181 msec 2024-11-09T21:39:40,281 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=38, resume processing ppid=37 2024-11-09T21:39:40,281 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=38, ppid=37, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=TestNs:TestTable in 185 msec 2024-11-09T21:39:40,283 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestNs:TestTable","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731188380283"}]},"ts":"1731188380283"} 2024-11-09T21:39:40,285 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestNs:TestTable, state=DISABLED in hbase:meta 2024-11-09T21:39:40,285 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set TestNs:TestTable to state=DISABLED 2024-11-09T21:39:40,287 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=37, state=SUCCESS, hasLock=false; DisableTableProcedure table=TestNs:TestTable in 203 msec 2024-11-09T21:39:40,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=37 2024-11-09T21:39:40,349 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: TestNs:TestTable completed 2024-11-09T21:39:40,350 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete TestNs:TestTable 2024-11-09T21:39:40,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] procedure2.ProcedureExecutor(1139): Stored pid=43, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=TestNs:TestTable 2024-11-09T21:39:40,353 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=43, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=TestNs:TestTable 2024-11-09T21:39:40,355 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=43, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=TestNs:TestTable 2024-11-09T21:39:40,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=43 2024-11-09T21:39:40,360 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/TestNs/TestTable/6b63665888c8e2240cbfb5b32190ca91 2024-11-09T21:39:40,360 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/TestNs/TestTable/963d0586451c5d763fb7c40b77e2f999 2024-11-09T21:39:40,363 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/TestNs/TestTable/6b63665888c8e2240cbfb5b32190ca91/cf, FileablePath, hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/TestNs/TestTable/6b63665888c8e2240cbfb5b32190ca91/recovered.edits] 2024-11-09T21:39:40,363 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/TestNs/TestTable/963d0586451c5d763fb7c40b77e2f999/cf, FileablePath, hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/TestNs/TestTable/963d0586451c5d763fb7c40b77e2f999/recovered.edits] 2024-11-09T21:39:40,372 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/TestNs/TestTable/963d0586451c5d763fb7c40b77e2f999/recovered.edits/4.seqid to hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/archive/data/TestNs/TestTable/963d0586451c5d763fb7c40b77e2f999/recovered.edits/4.seqid 2024-11-09T21:39:40,372 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/TestNs/TestTable/6b63665888c8e2240cbfb5b32190ca91/recovered.edits/4.seqid to hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/archive/data/TestNs/TestTable/6b63665888c8e2240cbfb5b32190ca91/recovered.edits/4.seqid 2024-11-09T21:39:40,373 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/TestNs/TestTable/963d0586451c5d763fb7c40b77e2f999 2024-11-09T21:39:40,373 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/TestNs/TestTable/6b63665888c8e2240cbfb5b32190ca91 2024-11-09T21:39:40,373 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestNs:TestTable regions 2024-11-09T21:39:40,377 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=43, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=TestNs:TestTable 2024-11-09T21:39:40,381 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of TestNs:TestTable from hbase:meta 2024-11-09T21:39:40,385 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'TestNs:TestTable' descriptor. 2024-11-09T21:39:40,388 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=43, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=TestNs:TestTable 2024-11-09T21:39:40,388 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'TestNs:TestTable' from region states. 2024-11-09T21:39:40,388 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"TestNs:TestTable,,1731188370631.6b63665888c8e2240cbfb5b32190ca91.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731188380388"}]},"ts":"9223372036854775807"} 2024-11-09T21:39:40,388 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"TestNs:TestTable,1,1731188370631.963d0586451c5d763fb7c40b77e2f999.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731188380388"}]},"ts":"9223372036854775807"} 2024-11-09T21:39:40,392 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-09T21:39:40,392 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 6b63665888c8e2240cbfb5b32190ca91, NAME => 'TestNs:TestTable,,1731188370631.6b63665888c8e2240cbfb5b32190ca91.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 963d0586451c5d763fb7c40b77e2f999, NAME => 'TestNs:TestTable,1,1731188370631.963d0586451c5d763fb7c40b77e2f999.', STARTKEY => '1', ENDKEY => ''}] 2024-11-09T21:39:40,392 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'TestNs:TestTable' as deleted. 2024-11-09T21:39:40,393 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"TestNs:TestTable","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731188380392"}]},"ts":"9223372036854775807"} 2024-11-09T21:39:40,396 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table TestNs:TestTable state from META 2024-11-09T21:39:40,398 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=43, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=TestNs:TestTable 2024-11-09T21:39:40,400 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=43, state=SUCCESS, hasLock=false; DeleteTableProcedure table=TestNs:TestTable in 48 msec 2024-11-09T21:39:40,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=43 2024-11-09T21:39:40,620 DEBUG [RPCClient-NioEventLoopGroup-5-4 {}] client.AsyncRegionLocator(219): Clear meta cache for TestNs:TestTable 2024-11-09T21:39:40,621 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: TestNs:TestTable completed 2024-11-09T21:39:40,626 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] master.HMaster$20(3601): Client=jenkins//172.17.0.2 delete TestNs 2024-11-09T21:39:40,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] procedure2.ProcedureExecutor(1139): Stored pid=44, state=RUNNABLE:DELETE_NAMESPACE_PREPARE, hasLock=false; DeleteNamespaceProcedure, namespace=TestNs 2024-11-09T21:39:40,635 INFO [PEWorker-5 {}] procedure.DeleteNamespaceProcedure(67): pid=44, state=RUNNABLE:DELETE_NAMESPACE_PREPARE, hasLock=true; DeleteNamespaceProcedure, namespace=TestNs 2024-11-09T21:39:40,637 INFO [PEWorker-5 {}] procedure.DeleteNamespaceProcedure(67): pid=44, state=RUNNABLE:DELETE_NAMESPACE_DELETE_FROM_NS_TABLE, hasLock=true; DeleteNamespaceProcedure, namespace=TestNs 2024-11-09T21:39:40,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-11-09T21:39:40,640 INFO [PEWorker-5 {}] procedure.DeleteNamespaceProcedure(67): pid=44, state=RUNNABLE:DELETE_NAMESPACE_DELETE_DIRECTORIES, hasLock=true; DeleteNamespaceProcedure, namespace=TestNs 2024-11-09T21:39:40,643 INFO [PEWorker-5 {}] procedure.DeleteNamespaceProcedure(67): pid=44, state=RUNNABLE:DELETE_NAMESPACE_REMOVE_NAMESPACE_QUOTA, hasLock=true; DeleteNamespaceProcedure, namespace=TestNs 2024-11-09T21:39:40,645 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=44, state=SUCCESS, hasLock=false; DeleteNamespaceProcedure, namespace=TestNs in 16 msec 2024-11-09T21:39:40,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32969 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-11-09T21:39:40,899 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$NamespaceProcedureBiConsumer(2745): Operation: DELETE_NAMESPACE, Namespace: TestNs completed 2024-11-09T21:39:40,899 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-09T21:39:40,900 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-09T21:39:40,900 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.tearDownAfterClass(TestClusterScopeQuotaThrottle.java:107) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-09T21:39:40,906 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T21:39:40,907 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T21:39:40,907 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-09T21:39:40,907 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-09T21:39:40,907 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1219616701, stopped=false 2024-11-09T21:39:40,907 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.quotas.MasterQuotasObserver 2024-11-09T21:39:40,908 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=d86ff69d6719,32969,1731188364211 2024-11-09T21:39:40,931 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45707-0x10122d3a32c0001, quorum=127.0.0.1:59566, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-09T21:39:40,931 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32969-0x10122d3a32c0000, quorum=127.0.0.1:59566, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-09T21:39:40,931 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45593-0x10122d3a32c0002, quorum=127.0.0.1:59566, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-09T21:39:40,931 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45707-0x10122d3a32c0001, quorum=127.0.0.1:59566, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T21:39:40,931 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-09T21:39:40,931 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32969-0x10122d3a32c0000, quorum=127.0.0.1:59566, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T21:39:40,931 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45593-0x10122d3a32c0002, quorum=127.0.0.1:59566, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T21:39:40,932 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-09T21:39:40,932 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45707-0x10122d3a32c0001, quorum=127.0.0.1:59566, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T21:39:40,932 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.tearDownAfterClass(TestClusterScopeQuotaThrottle.java:107) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-09T21:39:40,932 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:32969-0x10122d3a32c0000, quorum=127.0.0.1:59566, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T21:39:40,932 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45593-0x10122d3a32c0002, quorum=127.0.0.1:59566, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T21:39:40,932 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T21:39:40,933 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'd86ff69d6719,45707,1731188364951' ***** 2024-11-09T21:39:40,933 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-09T21:39:40,933 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'd86ff69d6719,45593,1731188365056' ***** 2024-11-09T21:39:40,933 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-09T21:39:40,933 INFO [RS:1;d86ff69d6719:45593 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-09T21:39:40,934 INFO [RS:1;d86ff69d6719:45593 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-09T21:39:40,934 INFO [RS:0;d86ff69d6719:45707 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-09T21:39:40,934 INFO [RS:1;d86ff69d6719:45593 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-09T21:39:40,934 INFO [RS:0;d86ff69d6719:45707 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-09T21:39:40,934 INFO [RS:0;d86ff69d6719:45707 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-09T21:39:40,934 INFO [RS:1;d86ff69d6719:45593 {}] regionserver.HRegionServer(3091): Received CLOSE for db7fd7a0f2c1cff1a9776ed1fa466bae 2024-11-09T21:39:40,934 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-09T21:39:40,934 INFO [RS:0;d86ff69d6719:45707 {}] regionserver.HRegionServer(959): stopping server d86ff69d6719,45707,1731188364951 2024-11-09T21:39:40,934 INFO [RS:0;d86ff69d6719:45707 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-09T21:39:40,934 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-09T21:39:40,934 INFO [RS:1;d86ff69d6719:45593 {}] regionserver.HRegionServer(959): stopping server d86ff69d6719,45593,1731188365056 2024-11-09T21:39:40,934 INFO [RS:1;d86ff69d6719:45593 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-09T21:39:40,934 INFO [RS:0;d86ff69d6719:45707 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;d86ff69d6719:45707. 2024-11-09T21:39:40,934 INFO [RS:1;d86ff69d6719:45593 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;d86ff69d6719:45593. 2024-11-09T21:39:40,935 DEBUG [RS:0;d86ff69d6719:45707 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-09T21:39:40,935 DEBUG [RS:1;d86ff69d6719:45593 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-09T21:39:40,935 DEBUG [RS:0;d86ff69d6719:45707 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T21:39:40,935 DEBUG [RS:1;d86ff69d6719:45593 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T21:39:40,935 DEBUG [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing db7fd7a0f2c1cff1a9776ed1fa466bae, disabling compactions & flushes 2024-11-09T21:39:40,935 INFO [RS:1;d86ff69d6719:45593 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-09T21:39:40,935 INFO [RS:0;d86ff69d6719:45707 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-09T21:39:40,935 INFO [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region hbase:quota,,1731188367265.db7fd7a0f2c1cff1a9776ed1fa466bae. 2024-11-09T21:39:40,935 INFO [RS:0;d86ff69d6719:45707 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-09T21:39:40,935 DEBUG [RS:1;d86ff69d6719:45593 {}] regionserver.HRegionServer(1325): Online Regions={db7fd7a0f2c1cff1a9776ed1fa466bae=hbase:quota,,1731188367265.db7fd7a0f2c1cff1a9776ed1fa466bae.} 2024-11-09T21:39:40,935 DEBUG [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:quota,,1731188367265.db7fd7a0f2c1cff1a9776ed1fa466bae. 2024-11-09T21:39:40,935 INFO [RS:0;d86ff69d6719:45707 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-09T21:39:40,935 DEBUG [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on hbase:quota,,1731188367265.db7fd7a0f2c1cff1a9776ed1fa466bae. after waiting 0 ms 2024-11-09T21:39:40,935 INFO [RS:0;d86ff69d6719:45707 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-09T21:39:40,935 DEBUG [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region hbase:quota,,1731188367265.db7fd7a0f2c1cff1a9776ed1fa466bae. 2024-11-09T21:39:40,935 DEBUG [RS:1;d86ff69d6719:45593 {}] regionserver.HRegionServer(1351): Waiting on db7fd7a0f2c1cff1a9776ed1fa466bae 2024-11-09T21:39:40,935 INFO [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing db7fd7a0f2c1cff1a9776ed1fa466bae 2/2 column families, dataSize=726 B heapSize=2.44 KB 2024-11-09T21:39:40,936 INFO [RS:0;d86ff69d6719:45707 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-09T21:39:40,936 DEBUG [RS:0;d86ff69d6719:45707 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-09T21:39:40,936 DEBUG [RS:0;d86ff69d6719:45707 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-09T21:39:40,936 DEBUG [RS_CLOSE_META-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-09T21:39:40,936 INFO [RS_CLOSE_META-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-09T21:39:40,936 DEBUG [RS_CLOSE_META-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-09T21:39:40,937 DEBUG [RS_CLOSE_META-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-09T21:39:40,937 DEBUG [RS_CLOSE_META-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-09T21:39:40,937 INFO [RS_CLOSE_META-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=13.33 KB heapSize=24.55 KB 2024-11-09T21:39:40,953 INFO [regionserver/d86ff69d6719:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T21:39:40,953 INFO [regionserver/d86ff69d6719:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T21:39:40,962 DEBUG [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/hbase/quota/db7fd7a0f2c1cff1a9776ed1fa466bae/.tmp/q/6d48cd530c2d4e36ad5ffc54330e6025 is 44, key is u.jenkins/q:s.default:/1731188373671/DeleteColumn/seqid=0 2024-11-09T21:39:40,966 DEBUG [RS_CLOSE_META-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/hbase/meta/1588230740/.tmp/info/63bbf5f6add0412e8f655a253d1218d3 is 135, key is hbase:quota,,1731188367265.db7fd7a0f2c1cff1a9776ed1fa466bae./info:regioninfo/1731188367801/Put/seqid=0 2024-11-09T21:39:40,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45239 is added to blk_1073741848_1024 (size=5302) 2024-11-09T21:39:40,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41017 is added to blk_1073741848_1024 (size=5302) 2024-11-09T21:39:40,972 INFO [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=597 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/hbase/quota/db7fd7a0f2c1cff1a9776ed1fa466bae/.tmp/q/6d48cd530c2d4e36ad5ffc54330e6025 2024-11-09T21:39:40,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45239 is added to blk_1073741849_1025 (size=7362) 2024-11-09T21:39:40,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41017 is added to blk_1073741849_1025 (size=7362) 2024-11-09T21:39:40,982 INFO [RS_CLOSE_META-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.80 KB at sequenceid=65 (bloomFilter=true), to=hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/hbase/meta/1588230740/.tmp/info/63bbf5f6add0412e8f655a253d1218d3 2024-11-09T21:39:40,986 INFO [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 6d48cd530c2d4e36ad5ffc54330e6025 2024-11-09T21:39:41,005 DEBUG [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/hbase/quota/db7fd7a0f2c1cff1a9776ed1fa466bae/.tmp/u/f55b68632da94523bb3f56390bd88a0a is 43, key is t.TestNs:TestTable/u:/1731188376346/DeleteFamily/seqid=0 2024-11-09T21:39:41,014 DEBUG [RS_CLOSE_META-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/hbase/meta/1588230740/.tmp/ns/326350a6bc234913aab040ba79943c11 is 92, key is TestNs:TestTable,1,1731188370631.963d0586451c5d763fb7c40b77e2f999./ns:/1731188380377/DeleteFamily/seqid=0 2024-11-09T21:39:41,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41017 is added to blk_1073741850_1026 (size=5250) 2024-11-09T21:39:41,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45239 is added to blk_1073741850_1026 (size=5250) 2024-11-09T21:39:41,039 INFO [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=129 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/hbase/quota/db7fd7a0f2c1cff1a9776ed1fa466bae/.tmp/u/f55b68632da94523bb3f56390bd88a0a 2024-11-09T21:39:41,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41017 is added to blk_1073741851_1027 (size=5710) 2024-11-09T21:39:41,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45239 is added to blk_1073741851_1027 (size=5710) 2024-11-09T21:39:41,050 INFO [RS_CLOSE_META-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=572 B at sequenceid=65 (bloomFilter=true), to=hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/hbase/meta/1588230740/.tmp/ns/326350a6bc234913aab040ba79943c11 2024-11-09T21:39:41,052 INFO [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f55b68632da94523bb3f56390bd88a0a 2024-11-09T21:39:41,054 DEBUG [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/hbase/quota/db7fd7a0f2c1cff1a9776ed1fa466bae/.tmp/q/6d48cd530c2d4e36ad5ffc54330e6025 as hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/hbase/quota/db7fd7a0f2c1cff1a9776ed1fa466bae/q/6d48cd530c2d4e36ad5ffc54330e6025 2024-11-09T21:39:41,069 INFO [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 6d48cd530c2d4e36ad5ffc54330e6025 2024-11-09T21:39:41,069 INFO [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/hbase/quota/db7fd7a0f2c1cff1a9776ed1fa466bae/q/6d48cd530c2d4e36ad5ffc54330e6025, entries=5, sequenceid=17, filesize=5.2 K 2024-11-09T21:39:41,071 DEBUG [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/hbase/quota/db7fd7a0f2c1cff1a9776ed1fa466bae/.tmp/u/f55b68632da94523bb3f56390bd88a0a as hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/hbase/quota/db7fd7a0f2c1cff1a9776ed1fa466bae/u/f55b68632da94523bb3f56390bd88a0a 2024-11-09T21:39:41,082 INFO [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f55b68632da94523bb3f56390bd88a0a 2024-11-09T21:39:41,082 INFO [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/hbase/quota/db7fd7a0f2c1cff1a9776ed1fa466bae/u/f55b68632da94523bb3f56390bd88a0a, entries=4, sequenceid=17, filesize=5.1 K 2024-11-09T21:39:41,084 INFO [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~726 B/726, heapSize ~2.41 KB/2464, currentSize=0 B/0 for db7fd7a0f2c1cff1a9776ed1fa466bae in 148ms, sequenceid=17, compaction requested=false 2024-11-09T21:39:41,086 DEBUG [RS_CLOSE_META-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/hbase/meta/1588230740/.tmp/rep_barrier/7af39f1d6528442d8689af1017e3282c is 101, key is TestNs:TestTable,1,1731188370631.963d0586451c5d763fb7c40b77e2f999./rep_barrier:/1731188380377/DeleteFamily/seqid=0 2024-11-09T21:39:41,091 DEBUG [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/hbase/quota/db7fd7a0f2c1cff1a9776ed1fa466bae/recovered.edits/20.seqid, newMaxSeqId=20, maxSeqId=1 2024-11-09T21:39:41,091 INFO [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed hbase:quota,,1731188367265.db7fd7a0f2c1cff1a9776ed1fa466bae. 2024-11-09T21:39:41,092 DEBUG [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for db7fd7a0f2c1cff1a9776ed1fa466bae: Waiting for close lock at 1731188380934Running coprocessor pre-close hooks at 1731188380935 (+1 ms)Disabling compacts and flushes for region at 1731188380935Disabling writes for close at 1731188380935Obtaining lock to block concurrent updates at 1731188380935Preparing flush snapshotting stores in db7fd7a0f2c1cff1a9776ed1fa466bae at 1731188380935Finished memstore snapshotting hbase:quota,,1731188367265.db7fd7a0f2c1cff1a9776ed1fa466bae., syncing WAL and waiting on mvcc, flushsize=dataSize=726, getHeapSize=2464, getOffHeapSize=0, getCellsCount=17 at 1731188380936 (+1 ms)Flushing stores of hbase:quota,,1731188367265.db7fd7a0f2c1cff1a9776ed1fa466bae. at 1731188380937 (+1 ms)Flushing db7fd7a0f2c1cff1a9776ed1fa466bae/q: creating writer at 1731188380937Flushing db7fd7a0f2c1cff1a9776ed1fa466bae/q: appending metadata at 1731188380956 (+19 ms)Flushing db7fd7a0f2c1cff1a9776ed1fa466bae/q: closing flushed file at 1731188380956Flushing db7fd7a0f2c1cff1a9776ed1fa466bae/u: creating writer at 1731188380987 (+31 ms)Flushing db7fd7a0f2c1cff1a9776ed1fa466bae/u: appending metadata at 1731188381003 (+16 ms)Flushing db7fd7a0f2c1cff1a9776ed1fa466bae/u: closing flushed file at 1731188381003Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@56fa42e4: reopening flushed file at 1731188381053 (+50 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@607fbcbd: reopening flushed file at 1731188381069 (+16 ms)Finished flush of dataSize ~726 B/726, heapSize ~2.41 KB/2464, currentSize=0 B/0 for db7fd7a0f2c1cff1a9776ed1fa466bae in 148ms, sequenceid=17, compaction requested=false at 1731188381084 (+15 ms)Writing region close event to WAL at 1731188381085 (+1 ms)Running coprocessor post-close hooks at 1731188381091 (+6 ms)Closed at 1731188381091 2024-11-09T21:39:41,092 DEBUG [RS_CLOSE_REGION-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:quota,,1731188367265.db7fd7a0f2c1cff1a9776ed1fa466bae. 2024-11-09T21:39:41,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41017 is added to blk_1073741852_1028 (size=5823) 2024-11-09T21:39:41,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45239 is added to blk_1073741852_1028 (size=5823) 2024-11-09T21:39:41,093 INFO [RS_CLOSE_META-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=515 B at sequenceid=65 (bloomFilter=true), to=hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/hbase/meta/1588230740/.tmp/rep_barrier/7af39f1d6528442d8689af1017e3282c 2024-11-09T21:39:41,117 DEBUG [RS_CLOSE_META-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/hbase/meta/1588230740/.tmp/table/f4e65f091c8b497eb217400a799d3551 is 95, key is TestNs:TestTable,1,1731188370631.963d0586451c5d763fb7c40b77e2f999./table:/1731188380377/DeleteFamily/seqid=0 2024-11-09T21:39:41,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45239 is added to blk_1073741853_1029 (size=5966) 2024-11-09T21:39:41,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41017 is added to blk_1073741853_1029 (size=5966) 2024-11-09T21:39:41,123 INFO [RS_CLOSE_META-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.47 KB at sequenceid=65 (bloomFilter=true), to=hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/hbase/meta/1588230740/.tmp/table/f4e65f091c8b497eb217400a799d3551 2024-11-09T21:39:41,133 DEBUG [RS_CLOSE_META-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/hbase/meta/1588230740/.tmp/info/63bbf5f6add0412e8f655a253d1218d3 as hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/hbase/meta/1588230740/info/63bbf5f6add0412e8f655a253d1218d3 2024-11-09T21:39:41,136 INFO [RS:1;d86ff69d6719:45593 {}] regionserver.HRegionServer(976): stopping server d86ff69d6719,45593,1731188365056; all regions closed. 2024-11-09T21:39:41,136 DEBUG [RS:1;d86ff69d6719:45593 {}] quotas.QuotaCache(112): Stopping QuotaRefresherChore chore. 2024-11-09T21:39:41,136 DEBUG [RS:0;d86ff69d6719:45707 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-09T21:39:41,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41017 is added to blk_1073741834_1010 (size=3917) 2024-11-09T21:39:41,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45239 is added to blk_1073741834_1010 (size=3917) 2024-11-09T21:39:41,146 INFO [RS_CLOSE_META-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/hbase/meta/1588230740/info/63bbf5f6add0412e8f655a253d1218d3, entries=21, sequenceid=65, filesize=7.2 K 2024-11-09T21:39:41,146 DEBUG [RS:1;d86ff69d6719:45593 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/oldWALs 2024-11-09T21:39:41,147 INFO [RS:1;d86ff69d6719:45593 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL d86ff69d6719%2C45593%2C1731188365056:(num 1731188366606) 2024-11-09T21:39:41,147 DEBUG [RS:1;d86ff69d6719:45593 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T21:39:41,147 INFO [RS:1;d86ff69d6719:45593 {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T21:39:41,147 INFO [RS:1;d86ff69d6719:45593 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-09T21:39:41,147 INFO [RS:1;d86ff69d6719:45593 {}] hbase.ChoreService(370): Chore service for: regionserver/d86ff69d6719:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-09T21:39:41,148 INFO [RS:1;d86ff69d6719:45593 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-09T21:39:41,148 INFO [RS:1;d86ff69d6719:45593 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-09T21:39:41,148 INFO [regionserver/d86ff69d6719:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-09T21:39:41,148 INFO [RS:1;d86ff69d6719:45593 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-09T21:39:41,148 INFO [RS:1;d86ff69d6719:45593 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-09T21:39:41,148 INFO [RS:1;d86ff69d6719:45593 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45593 2024-11-09T21:39:41,151 DEBUG [RS_CLOSE_META-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/hbase/meta/1588230740/.tmp/ns/326350a6bc234913aab040ba79943c11 as hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/hbase/meta/1588230740/ns/326350a6bc234913aab040ba79943c11 2024-11-09T21:39:41,162 INFO [RS_CLOSE_META-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/hbase/meta/1588230740/ns/326350a6bc234913aab040ba79943c11, entries=8, sequenceid=65, filesize=5.6 K 2024-11-09T21:39:41,163 DEBUG [RS_CLOSE_META-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/hbase/meta/1588230740/.tmp/rep_barrier/7af39f1d6528442d8689af1017e3282c as hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/hbase/meta/1588230740/rep_barrier/7af39f1d6528442d8689af1017e3282c 2024-11-09T21:39:41,172 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45593-0x10122d3a32c0002, quorum=127.0.0.1:59566, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/d86ff69d6719,45593,1731188365056 2024-11-09T21:39:41,172 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32969-0x10122d3a32c0000, quorum=127.0.0.1:59566, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-09T21:39:41,172 INFO [RS:1;d86ff69d6719:45593 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-09T21:39:41,173 INFO [RS_CLOSE_META-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/hbase/meta/1588230740/rep_barrier/7af39f1d6528442d8689af1017e3282c, entries=6, sequenceid=65, filesize=5.7 K 2024-11-09T21:39:41,174 DEBUG [RS_CLOSE_META-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/hbase/meta/1588230740/.tmp/table/f4e65f091c8b497eb217400a799d3551 as hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/hbase/meta/1588230740/table/f4e65f091c8b497eb217400a799d3551 2024-11-09T21:39:41,184 INFO [RS_CLOSE_META-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/hbase/meta/1588230740/table/f4e65f091c8b497eb217400a799d3551, entries=12, sequenceid=65, filesize=5.8 K 2024-11-09T21:39:41,185 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [d86ff69d6719,45593,1731188365056] 2024-11-09T21:39:41,186 INFO [RS_CLOSE_META-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~13.33 KB/13653, heapSize ~24.48 KB/25072, currentSize=0 B/0 for 1588230740 in 248ms, sequenceid=65, compaction requested=false 2024-11-09T21:39:41,192 DEBUG [RS_CLOSE_META-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/data/hbase/meta/1588230740/recovered.edits/68.seqid, newMaxSeqId=68, maxSeqId=1 2024-11-09T21:39:41,192 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/d86ff69d6719,45593,1731188365056 already deleted, retry=false 2024-11-09T21:39:41,193 DEBUG [RS_CLOSE_META-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-09T21:39:41,193 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; d86ff69d6719,45593,1731188365056 expired; onlineServers=1 2024-11-09T21:39:41,193 INFO [RS_CLOSE_META-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-09T21:39:41,193 DEBUG [RS_CLOSE_META-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731188380936Running coprocessor pre-close hooks at 1731188380936Disabling compacts and flushes for region at 1731188380936Disabling writes for close at 1731188380937 (+1 ms)Obtaining lock to block concurrent updates at 1731188380937Preparing flush snapshotting stores in 1588230740 at 1731188380937Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=13653, getHeapSize=25072, getOffHeapSize=0, getCellsCount=139 at 1731188380937Flushing stores of hbase:meta,,1.1588230740 at 1731188380938 (+1 ms)Flushing 1588230740/info: creating writer at 1731188380938Flushing 1588230740/info: appending metadata at 1731188380964 (+26 ms)Flushing 1588230740/info: closing flushed file at 1731188380965 (+1 ms)Flushing 1588230740/ns: creating writer at 1731188380994 (+29 ms)Flushing 1588230740/ns: appending metadata at 1731188381013 (+19 ms)Flushing 1588230740/ns: closing flushed file at 1731188381013Flushing 1588230740/rep_barrier: creating writer at 1731188381061 (+48 ms)Flushing 1588230740/rep_barrier: appending metadata at 1731188381085 (+24 ms)Flushing 1588230740/rep_barrier: closing flushed file at 1731188381085Flushing 1588230740/table: creating writer at 1731188381101 (+16 ms)Flushing 1588230740/table: appending metadata at 1731188381116 (+15 ms)Flushing 1588230740/table: closing flushed file at 1731188381116Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2d06a7a5: reopening flushed file at 1731188381132 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7b1bb2b5: reopening flushed file at 1731188381147 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@518a7ada: reopening flushed file at 1731188381162 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4cc5ed87: reopening flushed file at 1731188381173 (+11 ms)Finished flush of dataSize ~13.33 KB/13653, heapSize ~24.48 KB/25072, currentSize=0 B/0 for 1588230740 in 248ms, sequenceid=65, compaction requested=false at 1731188381186 (+13 ms)Writing region close event to WAL at 1731188381187 (+1 ms)Running coprocessor post-close hooks at 1731188381192 (+5 ms)Closed at 1731188381193 (+1 ms) 2024-11-09T21:39:41,193 DEBUG [RS_CLOSE_META-regionserver/d86ff69d6719:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-09T21:39:41,285 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45593-0x10122d3a32c0002, quorum=127.0.0.1:59566, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T21:39:41,285 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45593-0x10122d3a32c0002, quorum=127.0.0.1:59566, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T21:39:41,285 INFO [RS:1;d86ff69d6719:45593 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-09T21:39:41,286 INFO [RS:1;d86ff69d6719:45593 {}] regionserver.HRegionServer(1031): Exiting; stopping=d86ff69d6719,45593,1731188365056; zookeeper connection closed. 2024-11-09T21:39:41,286 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5c19b0c9 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5c19b0c9 2024-11-09T21:39:41,337 INFO [RS:0;d86ff69d6719:45707 {}] regionserver.HRegionServer(976): stopping server d86ff69d6719,45707,1731188364951; all regions closed. 2024-11-09T21:39:41,337 DEBUG [RS:0;d86ff69d6719:45707 {}] quotas.QuotaCache(112): Stopping QuotaRefresherChore chore. 2024-11-09T21:39:41,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45239 is added to blk_1073741835_1011 (size=17505) 2024-11-09T21:39:41,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41017 is added to blk_1073741835_1011 (size=17505) 2024-11-09T21:39:41,344 DEBUG [RS:0;d86ff69d6719:45707 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/oldWALs 2024-11-09T21:39:41,344 INFO [RS:0;d86ff69d6719:45707 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL d86ff69d6719%2C45707%2C1731188364951.meta:.meta(num 1731188366992) 2024-11-09T21:39:41,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45239 is added to blk_1073741833_1009 (size=4065) 2024-11-09T21:39:41,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41017 is added to blk_1073741833_1009 (size=4065) 2024-11-09T21:39:41,350 DEBUG [RS:0;d86ff69d6719:45707 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/oldWALs 2024-11-09T21:39:41,350 INFO [RS:0;d86ff69d6719:45707 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL d86ff69d6719%2C45707%2C1731188364951:(num 1731188366606) 2024-11-09T21:39:41,350 DEBUG [RS:0;d86ff69d6719:45707 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T21:39:41,350 INFO [RS:0;d86ff69d6719:45707 {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T21:39:41,350 INFO [RS:0;d86ff69d6719:45707 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-09T21:39:41,350 INFO [RS:0;d86ff69d6719:45707 {}] hbase.ChoreService(370): Chore service for: regionserver/d86ff69d6719:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-09T21:39:41,350 INFO [RS:0;d86ff69d6719:45707 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-09T21:39:41,351 INFO [regionserver/d86ff69d6719:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-09T21:39:41,351 INFO [RS:0;d86ff69d6719:45707 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45707 2024-11-09T21:39:41,368 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45707-0x10122d3a32c0001, quorum=127.0.0.1:59566, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/d86ff69d6719,45707,1731188364951 2024-11-09T21:39:41,368 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32969-0x10122d3a32c0000, quorum=127.0.0.1:59566, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-09T21:39:41,368 INFO [RS:0;d86ff69d6719:45707 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-09T21:39:41,376 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [d86ff69d6719,45707,1731188364951] 2024-11-09T21:39:41,384 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/d86ff69d6719,45707,1731188364951 already deleted, retry=false 2024-11-09T21:39:41,384 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; d86ff69d6719,45707,1731188364951 expired; onlineServers=0 2024-11-09T21:39:41,384 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'd86ff69d6719,32969,1731188364211' ***** 2024-11-09T21:39:41,385 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-09T21:39:41,385 INFO [M:0;d86ff69d6719:32969 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-09T21:39:41,385 INFO [M:0;d86ff69d6719:32969 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-09T21:39:41,385 DEBUG [M:0;d86ff69d6719:32969 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-09T21:39:41,385 DEBUG [M:0;d86ff69d6719:32969 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-09T21:39:41,385 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-09T21:39:41,385 DEBUG [master/d86ff69d6719:0:becomeActiveMaster-HFileCleaner.large.0-1731188366208 {}] cleaner.HFileCleaner(306): Exit Thread[master/d86ff69d6719:0:becomeActiveMaster-HFileCleaner.large.0-1731188366208,5,FailOnTimeoutGroup] 2024-11-09T21:39:41,385 DEBUG [master/d86ff69d6719:0:becomeActiveMaster-HFileCleaner.small.0-1731188366210 {}] cleaner.HFileCleaner(306): Exit Thread[master/d86ff69d6719:0:becomeActiveMaster-HFileCleaner.small.0-1731188366210,5,FailOnTimeoutGroup] 2024-11-09T21:39:41,385 INFO [M:0;d86ff69d6719:32969 {}] hbase.ChoreService(370): Chore service for: master/d86ff69d6719:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS, ScheduledChore name=QuotaObserverChore, period=60000, unit=MILLISECONDS] on shutdown 2024-11-09T21:39:41,386 INFO [M:0;d86ff69d6719:32969 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-09T21:39:41,386 DEBUG [M:0;d86ff69d6719:32969 {}] master.HMaster(1795): Stopping service threads 2024-11-09T21:39:41,386 INFO [M:0;d86ff69d6719:32969 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-09T21:39:41,386 INFO [M:0;d86ff69d6719:32969 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-09T21:39:41,386 ERROR [M:0;d86ff69d6719:32969 {}] procedure2.ProcedureExecutor(763): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[HFileArchiver-1,5,PEWorkerGroup] Thread[HFileArchiver-2,5,PEWorkerGroup] Thread[HFileArchiver-3,5,PEWorkerGroup] Thread[HFileArchiver-4,5,PEWorkerGroup] Thread[HFileArchiver-5,5,PEWorkerGroup] 2024-11-09T21:39:41,386 INFO [M:0;d86ff69d6719:32969 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-09T21:39:41,387 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-09T21:39:41,392 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32969-0x10122d3a32c0000, quorum=127.0.0.1:59566, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-09T21:39:41,393 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32969-0x10122d3a32c0000, quorum=127.0.0.1:59566, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T21:39:41,393 DEBUG [M:0;d86ff69d6719:32969 {}] zookeeper.ZKUtil(347): master:32969-0x10122d3a32c0000, quorum=127.0.0.1:59566, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-09T21:39:41,393 WARN [M:0;d86ff69d6719:32969 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-09T21:39:41,394 INFO [M:0;d86ff69d6719:32969 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/.lastflushedseqids 2024-11-09T21:39:41,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41017 is added to blk_1073741854_1030 (size=134) 2024-11-09T21:39:41,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45239 is added to blk_1073741854_1030 (size=134) 2024-11-09T21:39:41,406 INFO [M:0;d86ff69d6719:32969 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-09T21:39:41,406 INFO [M:0;d86ff69d6719:32969 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-09T21:39:41,406 DEBUG [M:0;d86ff69d6719:32969 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-09T21:39:41,406 INFO [M:0;d86ff69d6719:32969 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T21:39:41,406 DEBUG [M:0;d86ff69d6719:32969 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T21:39:41,406 DEBUG [M:0;d86ff69d6719:32969 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-09T21:39:41,406 DEBUG [M:0;d86ff69d6719:32969 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T21:39:41,406 INFO [M:0;d86ff69d6719:32969 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=157.54 KB heapSize=190.75 KB 2024-11-09T21:39:41,424 DEBUG [M:0;d86ff69d6719:32969 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7b5f23a4c3e148689b33a47c53149b2b is 82, key is hbase:meta,,1/info:regioninfo/1731188367076/Put/seqid=0 2024-11-09T21:39:41,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45239 is added to blk_1073741855_1031 (size=5672) 2024-11-09T21:39:41,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41017 is added to blk_1073741855_1031 (size=5672) 2024-11-09T21:39:41,431 INFO [M:0;d86ff69d6719:32969 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=375 (bloomFilter=true), to=hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7b5f23a4c3e148689b33a47c53149b2b 2024-11-09T21:39:41,466 DEBUG [M:0;d86ff69d6719:32969 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/521b315e3d03409e9a3799138bdc26ea is 958, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731188367830/Put/seqid=0 2024-11-09T21:39:41,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41017 is added to blk_1073741856_1032 (size=13416) 2024-11-09T21:39:41,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45239 is added to blk_1073741856_1032 (size=13416) 2024-11-09T21:39:41,473 INFO [M:0;d86ff69d6719:32969 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=156.92 KB at sequenceid=375 (bloomFilter=true), to=hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/521b315e3d03409e9a3799138bdc26ea 2024-11-09T21:39:41,476 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45707-0x10122d3a32c0001, quorum=127.0.0.1:59566, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T21:39:41,476 INFO [RS:0;d86ff69d6719:45707 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-09T21:39:41,476 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45707-0x10122d3a32c0001, quorum=127.0.0.1:59566, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T21:39:41,476 INFO [RS:0;d86ff69d6719:45707 {}] regionserver.HRegionServer(1031): Exiting; stopping=d86ff69d6719,45707,1731188364951; zookeeper connection closed. 2024-11-09T21:39:41,476 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7fb3faf4 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7fb3faf4 2024-11-09T21:39:41,477 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-09T21:39:41,498 DEBUG [M:0;d86ff69d6719:32969 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8276075d0c0b455ba9e7fa2b58b51634 is 69, key is d86ff69d6719,45593,1731188365056/rs:state/1731188366320/Put/seqid=0 2024-11-09T21:39:41,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45239 is added to blk_1073741857_1033 (size=5224) 2024-11-09T21:39:41,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41017 is added to blk_1073741857_1033 (size=5224) 2024-11-09T21:39:41,512 INFO [M:0;d86ff69d6719:32969 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=375 (bloomFilter=true), to=hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8276075d0c0b455ba9e7fa2b58b51634 2024-11-09T21:39:41,522 DEBUG [M:0;d86ff69d6719:32969 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7b5f23a4c3e148689b33a47c53149b2b as hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/7b5f23a4c3e148689b33a47c53149b2b 2024-11-09T21:39:41,531 INFO [M:0;d86ff69d6719:32969 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/7b5f23a4c3e148689b33a47c53149b2b, entries=8, sequenceid=375, filesize=5.5 K 2024-11-09T21:39:41,533 DEBUG [M:0;d86ff69d6719:32969 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/521b315e3d03409e9a3799138bdc26ea as hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/521b315e3d03409e9a3799138bdc26ea 2024-11-09T21:39:41,540 INFO [M:0;d86ff69d6719:32969 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/521b315e3d03409e9a3799138bdc26ea, entries=44, sequenceid=375, filesize=13.1 K 2024-11-09T21:39:41,541 DEBUG [M:0;d86ff69d6719:32969 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8276075d0c0b455ba9e7fa2b58b51634 as hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/8276075d0c0b455ba9e7fa2b58b51634 2024-11-09T21:39:41,549 INFO [M:0;d86ff69d6719:32969 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36387/user/jenkins/test-data/c4351669-009a-b813-346a-f5095b14fa0b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/8276075d0c0b455ba9e7fa2b58b51634, entries=2, sequenceid=375, filesize=5.1 K 2024-11-09T21:39:41,551 INFO [M:0;d86ff69d6719:32969 {}] regionserver.HRegion(3140): Finished flush of dataSize ~157.54 KB/161325, heapSize ~190.45 KB/195024, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 145ms, sequenceid=375, compaction requested=false 2024-11-09T21:39:41,552 INFO [M:0;d86ff69d6719:32969 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T21:39:41,552 DEBUG [M:0;d86ff69d6719:32969 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731188381406Disabling compacts and flushes for region at 1731188381406Disabling writes for close at 1731188381406Obtaining lock to block concurrent updates at 1731188381406Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731188381406Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=161325, getHeapSize=195264, getOffHeapSize=0, getCellsCount=434 at 1731188381407 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731188381407Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731188381407Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731188381424 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731188381424Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731188381439 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731188381465 (+26 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731188381465Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731188381480 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731188381497 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731188381497Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@257f6913: reopening flushed file at 1731188381521 (+24 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2f57d755: reopening flushed file at 1731188381531 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5f40db3: reopening flushed file at 1731188381540 (+9 ms)Finished flush of dataSize ~157.54 KB/161325, heapSize ~190.45 KB/195024, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 145ms, sequenceid=375, compaction requested=false at 1731188381551 (+11 ms)Writing region close event to WAL at 1731188381552 (+1 ms)Closed at 1731188381552 2024-11-09T21:39:41,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41017 is added to blk_1073741830_1006 (size=186355) 2024-11-09T21:39:41,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45239 is added to blk_1073741830_1006 (size=186355) 2024-11-09T21:39:41,558 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-09T21:39:41,558 INFO [M:0;d86ff69d6719:32969 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-09T21:39:41,558 INFO [M:0;d86ff69d6719:32969 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:32969 2024-11-09T21:39:41,558 INFO [M:0;d86ff69d6719:32969 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-09T21:39:41,684 INFO [M:0;d86ff69d6719:32969 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-09T21:39:41,684 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32969-0x10122d3a32c0000, quorum=127.0.0.1:59566, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T21:39:41,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32969-0x10122d3a32c0000, quorum=127.0.0.1:59566, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T21:39:41,732 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2cddf455{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T21:39:41,735 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6f938ea2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-09T21:39:41,735 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-09T21:39:41,735 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@46797e8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-09T21:39:41,736 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@267ab2dd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91f2bcce-8980-f42f-2f69-e9f1a8e401b3/hadoop.log.dir/,STOPPED} 2024-11-09T21:39:41,738 WARN [BP-1298723039-172.17.0.2-1731188360083 heartbeating to localhost/127.0.0.1:36387 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-09T21:39:41,739 WARN [BP-1298723039-172.17.0.2-1731188360083 heartbeating to localhost/127.0.0.1:36387 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1298723039-172.17.0.2-1731188360083 (Datanode Uuid 73d4c1be-99df-4545-9076-1ee81d27db8a) service to localhost/127.0.0.1:36387 2024-11-09T21:39:41,740 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91f2bcce-8980-f42f-2f69-e9f1a8e401b3/cluster_719ad75b-0962-1c01-5841-5a2981114368/data/data3/current/BP-1298723039-172.17.0.2-1731188360083 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T21:39:41,740 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91f2bcce-8980-f42f-2f69-e9f1a8e401b3/cluster_719ad75b-0962-1c01-5841-5a2981114368/data/data4/current/BP-1298723039-172.17.0.2-1731188360083 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T21:39:41,740 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-09T21:39:41,740 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-09T21:39:41,741 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-09T21:39:41,751 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2c7e6768{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T21:39:41,751 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2c8e8509{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-09T21:39:41,751 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-09T21:39:41,752 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3768391{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-09T21:39:41,752 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@180f6c17{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91f2bcce-8980-f42f-2f69-e9f1a8e401b3/hadoop.log.dir/,STOPPED} 2024-11-09T21:39:41,753 WARN [BP-1298723039-172.17.0.2-1731188360083 heartbeating to localhost/127.0.0.1:36387 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-09T21:39:41,753 WARN [BP-1298723039-172.17.0.2-1731188360083 heartbeating to localhost/127.0.0.1:36387 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1298723039-172.17.0.2-1731188360083 (Datanode Uuid 9b4afbb2-eba7-480a-8aee-bbc469ac45d1) service to localhost/127.0.0.1:36387 2024-11-09T21:39:41,754 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91f2bcce-8980-f42f-2f69-e9f1a8e401b3/cluster_719ad75b-0962-1c01-5841-5a2981114368/data/data1/current/BP-1298723039-172.17.0.2-1731188360083 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T21:39:41,754 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-09T21:39:41,754 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-09T21:39:41,754 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91f2bcce-8980-f42f-2f69-e9f1a8e401b3/cluster_719ad75b-0962-1c01-5841-5a2981114368/data/data2/current/BP-1298723039-172.17.0.2-1731188360083 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T21:39:41,754 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-09T21:39:41,760 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@70f1962d{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-09T21:39:41,761 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7c0994b2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-09T21:39:41,761 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-09T21:39:41,761 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@663b1cba{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-09T21:39:41,762 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@73ed0cb3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/91f2bcce-8980-f42f-2f69-e9f1a8e401b3/hadoop.log.dir/,STOPPED} 2024-11-09T21:39:41,770 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-09T21:39:41,804 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down