2024-11-07 15:48:20,762 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@6f204a1a 2024-11-07 15:48:20,779 main DEBUG Took 0.014312 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-07 15:48:20,780 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-07 15:48:20,780 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-07 15:48:20,781 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-07 15:48:20,782 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 15:48:20,807 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-07 15:48:20,820 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 15:48:20,822 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 15:48:20,823 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 15:48:20,823 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 15:48:20,824 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 15:48:20,824 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 15:48:20,826 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 15:48:20,826 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 15:48:20,827 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 15:48:20,827 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 15:48:20,829 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 15:48:20,829 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 15:48:20,830 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 15:48:20,830 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 15:48:20,831 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 15:48:20,831 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 15:48:20,832 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 15:48:20,833 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 15:48:20,833 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 15:48:20,834 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 15:48:20,834 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 15:48:20,835 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 15:48:20,836 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 15:48:20,836 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 15:48:20,837 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 15:48:20,837 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-07 15:48:20,839 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 15:48:20,841 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-07 15:48:20,844 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-07 15:48:20,844 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-07 15:48:20,846 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-07 15:48:20,847 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-07 15:48:20,858 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-07 15:48:20,862 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-07 15:48:20,864 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-07 15:48:20,864 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-07 15:48:20,865 main DEBUG createAppenders(={Console}) 2024-11-07 15:48:20,866 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@6f204a1a initialized 2024-11-07 15:48:20,866 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@6f204a1a 2024-11-07 15:48:20,867 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@6f204a1a OK. 2024-11-07 15:48:20,868 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-07 15:48:20,868 main DEBUG OutputStream closed 2024-11-07 15:48:20,868 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-07 15:48:20,869 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-07 15:48:20,869 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@21e360a OK 2024-11-07 15:48:20,957 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-07 15:48:20,959 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-07 15:48:20,960 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-07 15:48:20,961 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-07 15:48:20,962 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-07 15:48:20,962 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-07 15:48:20,962 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-07 15:48:20,963 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-07 15:48:20,963 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-07 15:48:20,963 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-07 15:48:20,964 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-07 15:48:20,964 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-07 15:48:20,964 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-07 15:48:20,965 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-07 15:48:20,965 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-07 15:48:20,965 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-07 15:48:20,965 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-07 15:48:20,966 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-07 15:48:20,968 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-07 15:48:20,969 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@42b02722) with optional ClassLoader: null 2024-11-07 15:48:20,969 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-07 15:48:20,970 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@42b02722] started OK. 2024-11-07T15:48:20,983 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle timeout: 13 mins 2024-11-07 15:48:20,986 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-07 15:48:20,986 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-07T15:48:21,310 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f5959759-6c57-622f-21bf-cf0b32045edd 2024-11-07T15:48:21,328 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=2, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-07T15:48:21,349 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f5959759-6c57-622f-21bf-cf0b32045edd/cluster_1448e0b2-db09-f2c0-aa15-452c85982d01, deleteOnExit=true 2024-11-07T15:48:21,350 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-07T15:48:21,351 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f5959759-6c57-622f-21bf-cf0b32045edd/test.cache.data in system properties and HBase conf 2024-11-07T15:48:21,352 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f5959759-6c57-622f-21bf-cf0b32045edd/hadoop.tmp.dir in system properties and HBase conf 2024-11-07T15:48:21,352 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f5959759-6c57-622f-21bf-cf0b32045edd/hadoop.log.dir in system properties and HBase conf 2024-11-07T15:48:21,353 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f5959759-6c57-622f-21bf-cf0b32045edd/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-07T15:48:21,354 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f5959759-6c57-622f-21bf-cf0b32045edd/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-07T15:48:21,354 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-07T15:48:21,488 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-07T15:48:21,622 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-07T15:48:21,628 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f5959759-6c57-622f-21bf-cf0b32045edd/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-07T15:48:21,629 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f5959759-6c57-622f-21bf-cf0b32045edd/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-07T15:48:21,629 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f5959759-6c57-622f-21bf-cf0b32045edd/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-07T15:48:21,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f5959759-6c57-622f-21bf-cf0b32045edd/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-07T15:48:21,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f5959759-6c57-622f-21bf-cf0b32045edd/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-07T15:48:21,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f5959759-6c57-622f-21bf-cf0b32045edd/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-07T15:48:21,632 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f5959759-6c57-622f-21bf-cf0b32045edd/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-07T15:48:21,633 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f5959759-6c57-622f-21bf-cf0b32045edd/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-07T15:48:21,633 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f5959759-6c57-622f-21bf-cf0b32045edd/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-07T15:48:21,634 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f5959759-6c57-622f-21bf-cf0b32045edd/nfs.dump.dir in system properties and HBase conf 2024-11-07T15:48:21,634 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f5959759-6c57-622f-21bf-cf0b32045edd/java.io.tmpdir in system properties and HBase conf 2024-11-07T15:48:21,635 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f5959759-6c57-622f-21bf-cf0b32045edd/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-07T15:48:21,635 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f5959759-6c57-622f-21bf-cf0b32045edd/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-07T15:48:21,636 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f5959759-6c57-622f-21bf-cf0b32045edd/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-07T15:48:22,623 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-07T15:48:22,737 INFO [Time-limited test {}] log.Log(170): Logging initialized @2905ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-07T15:48:22,852 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T15:48:22,947 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T15:48:22,990 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T15:48:22,990 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T15:48:22,992 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-07T15:48:23,009 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T15:48:23,012 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4d649244{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f5959759-6c57-622f-21bf-cf0b32045edd/hadoop.log.dir/,AVAILABLE} 2024-11-07T15:48:23,013 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3b1a9f49{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T15:48:23,266 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@300f56e1{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f5959759-6c57-622f-21bf-cf0b32045edd/java.io.tmpdir/jetty-localhost-46745-hadoop-hdfs-3_4_1-tests_jar-_-any-7106850298086982235/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-07T15:48:23,283 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@51edec43{HTTP/1.1, (http/1.1)}{localhost:46745} 2024-11-07T15:48:23,283 INFO [Time-limited test {}] server.Server(415): Started @3452ms 2024-11-07T15:48:23,779 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T15:48:23,789 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T15:48:23,793 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T15:48:23,793 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T15:48:23,793 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-07T15:48:23,795 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6a3983b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f5959759-6c57-622f-21bf-cf0b32045edd/hadoop.log.dir/,AVAILABLE} 2024-11-07T15:48:23,796 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@167d5b17{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T15:48:23,962 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4756d35c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f5959759-6c57-622f-21bf-cf0b32045edd/java.io.tmpdir/jetty-localhost-44787-hadoop-hdfs-3_4_1-tests_jar-_-any-69471176160919718/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T15:48:23,963 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@42690001{HTTP/1.1, (http/1.1)}{localhost:44787} 2024-11-07T15:48:23,963 INFO [Time-limited test {}] server.Server(415): Started @4132ms 2024-11-07T15:48:24,028 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-07T15:48:24,173 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-07T15:48:24,178 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-07T15:48:24,180 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-07T15:48:24,180 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-07T15:48:24,181 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-07T15:48:24,181 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7416e56c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f5959759-6c57-622f-21bf-cf0b32045edd/hadoop.log.dir/,AVAILABLE} 2024-11-07T15:48:24,182 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6ff9f4a9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-07T15:48:24,315 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@36a28386{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f5959759-6c57-622f-21bf-cf0b32045edd/java.io.tmpdir/jetty-localhost-46375-hadoop-hdfs-3_4_1-tests_jar-_-any-8267435919093196560/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T15:48:24,315 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7ccb07ae{HTTP/1.1, (http/1.1)}{localhost:46375} 2024-11-07T15:48:24,316 INFO [Time-limited test {}] server.Server(415): Started @4485ms 2024-11-07T15:48:24,319 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-07T15:48:24,523 WARN [Thread-95 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f5959759-6c57-622f-21bf-cf0b32045edd/cluster_1448e0b2-db09-f2c0-aa15-452c85982d01/data/data1/current/BP-2051735518-172.17.0.2-1730994502303/current, will proceed with Du for space computation calculation, 2024-11-07T15:48:24,523 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f5959759-6c57-622f-21bf-cf0b32045edd/cluster_1448e0b2-db09-f2c0-aa15-452c85982d01/data/data3/current/BP-2051735518-172.17.0.2-1730994502303/current, will proceed with Du for space computation calculation, 2024-11-07T15:48:24,524 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f5959759-6c57-622f-21bf-cf0b32045edd/cluster_1448e0b2-db09-f2c0-aa15-452c85982d01/data/data2/current/BP-2051735518-172.17.0.2-1730994502303/current, will proceed with Du for space computation calculation, 2024-11-07T15:48:24,525 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f5959759-6c57-622f-21bf-cf0b32045edd/cluster_1448e0b2-db09-f2c0-aa15-452c85982d01/data/data4/current/BP-2051735518-172.17.0.2-1730994502303/current, will proceed with Du for space computation calculation, 2024-11-07T15:48:24,584 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-07T15:48:24,584 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-07T15:48:24,652 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfc9ead9935c3d966 with lease ID 0x67b2184db1f462b1: Processing first storage report for DS-131cfc8f-5016-40bd-99cc-b52dd3fd14ba from datanode DatanodeRegistration(127.0.0.1:35461, datanodeUuid=2efb23b7-1301-4064-9c0a-e9338a092385, infoPort=39027, infoSecurePort=0, ipcPort=43669, storageInfo=lv=-57;cid=testClusterID;nsid=1235033047;c=1730994502303) 2024-11-07T15:48:24,653 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfc9ead9935c3d966 with lease ID 0x67b2184db1f462b1: from storage DS-131cfc8f-5016-40bd-99cc-b52dd3fd14ba node DatanodeRegistration(127.0.0.1:35461, datanodeUuid=2efb23b7-1301-4064-9c0a-e9338a092385, infoPort=39027, infoSecurePort=0, ipcPort=43669, storageInfo=lv=-57;cid=testClusterID;nsid=1235033047;c=1730994502303), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-07T15:48:24,654 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc385961548478d21 with lease ID 0x67b2184db1f462b2: Processing first storage report for DS-9b29c662-46af-4f01-a960-db4bbd90c3b6 from datanode DatanodeRegistration(127.0.0.1:40735, datanodeUuid=09a05c23-7213-4c48-afb1-b798647ec89a, infoPort=32777, infoSecurePort=0, ipcPort=43371, storageInfo=lv=-57;cid=testClusterID;nsid=1235033047;c=1730994502303) 2024-11-07T15:48:24,654 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc385961548478d21 with lease ID 0x67b2184db1f462b2: from storage DS-9b29c662-46af-4f01-a960-db4bbd90c3b6 node DatanodeRegistration(127.0.0.1:40735, datanodeUuid=09a05c23-7213-4c48-afb1-b798647ec89a, infoPort=32777, infoSecurePort=0, ipcPort=43371, storageInfo=lv=-57;cid=testClusterID;nsid=1235033047;c=1730994502303), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T15:48:24,654 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfc9ead9935c3d966 with lease ID 0x67b2184db1f462b1: Processing first storage report for DS-6996a082-b8ac-4ffc-b8dd-c0b7819086a1 from datanode DatanodeRegistration(127.0.0.1:35461, datanodeUuid=2efb23b7-1301-4064-9c0a-e9338a092385, infoPort=39027, infoSecurePort=0, ipcPort=43669, storageInfo=lv=-57;cid=testClusterID;nsid=1235033047;c=1730994502303) 2024-11-07T15:48:24,654 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfc9ead9935c3d966 with lease ID 0x67b2184db1f462b1: from storage DS-6996a082-b8ac-4ffc-b8dd-c0b7819086a1 node DatanodeRegistration(127.0.0.1:35461, datanodeUuid=2efb23b7-1301-4064-9c0a-e9338a092385, infoPort=39027, infoSecurePort=0, ipcPort=43669, storageInfo=lv=-57;cid=testClusterID;nsid=1235033047;c=1730994502303), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T15:48:24,654 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc385961548478d21 with lease ID 0x67b2184db1f462b2: Processing first storage report for DS-a54c6544-798a-4047-9abd-e1456676fbc9 from datanode DatanodeRegistration(127.0.0.1:40735, datanodeUuid=09a05c23-7213-4c48-afb1-b798647ec89a, infoPort=32777, infoSecurePort=0, ipcPort=43371, storageInfo=lv=-57;cid=testClusterID;nsid=1235033047;c=1730994502303) 2024-11-07T15:48:24,655 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc385961548478d21 with lease ID 0x67b2184db1f462b2: from storage DS-a54c6544-798a-4047-9abd-e1456676fbc9 node DatanodeRegistration(127.0.0.1:40735, datanodeUuid=09a05c23-7213-4c48-afb1-b798647ec89a, infoPort=32777, infoSecurePort=0, ipcPort=43371, storageInfo=lv=-57;cid=testClusterID;nsid=1235033047;c=1730994502303), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-07T15:48:24,748 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f5959759-6c57-622f-21bf-cf0b32045edd 2024-11-07T15:48:24,836 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f5959759-6c57-622f-21bf-cf0b32045edd/cluster_1448e0b2-db09-f2c0-aa15-452c85982d01/zookeeper_0, clientPort=50813, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f5959759-6c57-622f-21bf-cf0b32045edd/cluster_1448e0b2-db09-f2c0-aa15-452c85982d01/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f5959759-6c57-622f-21bf-cf0b32045edd/cluster_1448e0b2-db09-f2c0-aa15-452c85982d01/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-07T15:48:24,847 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=50813 2024-11-07T15:48:24,861 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T15:48:24,864 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T15:48:25,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35461 is added to blk_1073741825_1001 (size=7) 2024-11-07T15:48:25,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40735 is added to blk_1073741825_1001 (size=7) 2024-11-07T15:48:25,555 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61 with version=8 2024-11-07T15:48:25,555 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/hbase-staging 2024-11-07T15:48:25,659 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-07T15:48:25,967 INFO [Time-limited test {}] client.ConnectionUtils(128): master/c2682f91c1e2:0 server-side Connection retries=45 2024-11-07T15:48:25,978 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T15:48:25,979 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-07T15:48:25,984 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-07T15:48:25,984 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T15:48:25,984 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-07T15:48:26,148 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-07T15:48:26,223 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-07T15:48:26,234 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-07T15:48:26,238 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-07T15:48:26,268 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 18740 (auto-detected) 2024-11-07T15:48:26,269 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-07T15:48:26,291 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38893 2024-11-07T15:48:26,318 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:38893 connecting to ZooKeeper ensemble=127.0.0.1:50813 2024-11-07T15:48:26,354 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:388930x0, quorum=127.0.0.1:50813, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-07T15:48:26,357 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38893-0x101836b79d20000 connected 2024-11-07T15:48:26,385 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T15:48:26,388 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T15:48:26,398 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38893-0x101836b79d20000, quorum=127.0.0.1:50813, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T15:48:26,402 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61, hbase.cluster.distributed=false 2024-11-07T15:48:26,426 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38893-0x101836b79d20000, quorum=127.0.0.1:50813, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-07T15:48:26,430 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38893 2024-11-07T15:48:26,431 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38893 2024-11-07T15:48:26,431 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38893 2024-11-07T15:48:26,433 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38893 2024-11-07T15:48:26,434 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38893 2024-11-07T15:48:26,566 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c2682f91c1e2:0 server-side Connection retries=45 2024-11-07T15:48:26,568 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T15:48:26,568 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-07T15:48:26,569 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-07T15:48:26,569 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T15:48:26,569 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-07T15:48:26,572 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-07T15:48:26,575 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-07T15:48:26,576 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34001 2024-11-07T15:48:26,578 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34001 connecting to ZooKeeper ensemble=127.0.0.1:50813 2024-11-07T15:48:26,579 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T15:48:26,585 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T15:48:26,595 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:340010x0, quorum=127.0.0.1:50813, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-07T15:48:26,596 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34001-0x101836b79d20001, quorum=127.0.0.1:50813, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T15:48:26,596 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34001-0x101836b79d20001 connected 2024-11-07T15:48:26,601 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-07T15:48:26,609 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-07T15:48:26,611 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34001-0x101836b79d20001, quorum=127.0.0.1:50813, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-07T15:48:26,617 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34001-0x101836b79d20001, quorum=127.0.0.1:50813, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-07T15:48:26,617 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34001 2024-11-07T15:48:26,618 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34001 2024-11-07T15:48:26,618 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34001 2024-11-07T15:48:26,619 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34001 2024-11-07T15:48:26,619 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34001 2024-11-07T15:48:26,637 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c2682f91c1e2:0 server-side Connection retries=45 2024-11-07T15:48:26,637 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T15:48:26,637 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-07T15:48:26,638 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-07T15:48:26,638 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-07T15:48:26,638 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-07T15:48:26,638 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-07T15:48:26,639 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-07T15:48:26,640 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37185 2024-11-07T15:48:26,641 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37185 connecting to ZooKeeper ensemble=127.0.0.1:50813 2024-11-07T15:48:26,642 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T15:48:26,645 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T15:48:26,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:371850x0, quorum=127.0.0.1:50813, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-07T15:48:26,654 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:371850x0, quorum=127.0.0.1:50813, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T15:48:26,654 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37185-0x101836b79d20002 connected 2024-11-07T15:48:26,655 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-07T15:48:26,657 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-07T15:48:26,658 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37185-0x101836b79d20002, quorum=127.0.0.1:50813, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-07T15:48:26,660 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37185-0x101836b79d20002, quorum=127.0.0.1:50813, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-07T15:48:26,664 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37185 2024-11-07T15:48:26,667 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37185 2024-11-07T15:48:26,667 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37185 2024-11-07T15:48:26,669 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37185 2024-11-07T15:48:26,669 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37185 2024-11-07T15:48:26,684 DEBUG [M:0;c2682f91c1e2:38893 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;c2682f91c1e2:38893 2024-11-07T15:48:26,685 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/c2682f91c1e2,38893,1730994505717 2024-11-07T15:48:26,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38893-0x101836b79d20000, quorum=127.0.0.1:50813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T15:48:26,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37185-0x101836b79d20002, quorum=127.0.0.1:50813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T15:48:26,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34001-0x101836b79d20001, quorum=127.0.0.1:50813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T15:48:26,694 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38893-0x101836b79d20000, quorum=127.0.0.1:50813, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/c2682f91c1e2,38893,1730994505717 2024-11-07T15:48:26,724 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38893-0x101836b79d20000, quorum=127.0.0.1:50813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T15:48:26,724 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37185-0x101836b79d20002, quorum=127.0.0.1:50813, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-07T15:48:26,724 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34001-0x101836b79d20001, quorum=127.0.0.1:50813, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-07T15:48:26,725 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37185-0x101836b79d20002, quorum=127.0.0.1:50813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T15:48:26,725 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34001-0x101836b79d20001, quorum=127.0.0.1:50813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T15:48:26,726 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38893-0x101836b79d20000, quorum=127.0.0.1:50813, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-07T15:48:26,730 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/c2682f91c1e2,38893,1730994505717 from backup master directory 2024-11-07T15:48:26,734 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38893-0x101836b79d20000, quorum=127.0.0.1:50813, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/c2682f91c1e2,38893,1730994505717 2024-11-07T15:48:26,734 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37185-0x101836b79d20002, quorum=127.0.0.1:50813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T15:48:26,734 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34001-0x101836b79d20001, quorum=127.0.0.1:50813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T15:48:26,734 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38893-0x101836b79d20000, quorum=127.0.0.1:50813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-07T15:48:26,735 WARN [master/c2682f91c1e2:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-07T15:48:26,735 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=c2682f91c1e2,38893,1730994505717 2024-11-07T15:48:26,737 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-07T15:48:26,739 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-07T15:48:26,805 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/hbase.id] with ID: 54fed9da-5a5f-49ce-a350-94d6e7a82769 2024-11-07T15:48:26,806 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/.tmp/hbase.id 2024-11-07T15:48:26,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40735 is added to blk_1073741826_1002 (size=42) 2024-11-07T15:48:26,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35461 is added to blk_1073741826_1002 (size=42) 2024-11-07T15:48:26,827 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/.tmp/hbase.id]:[hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/hbase.id] 2024-11-07T15:48:26,882 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-07T15:48:26,887 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-07T15:48:26,909 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 19ms. 2024-11-07T15:48:26,915 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38893-0x101836b79d20000, quorum=127.0.0.1:50813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T15:48:26,915 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37185-0x101836b79d20002, quorum=127.0.0.1:50813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T15:48:26,915 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34001-0x101836b79d20001, quorum=127.0.0.1:50813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T15:48:26,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35461 is added to blk_1073741827_1003 (size=196) 2024-11-07T15:48:26,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40735 is added to blk_1073741827_1003 (size=196) 2024-11-07T15:48:26,958 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-07T15:48:26,960 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-07T15:48:26,982 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:150) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:174) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:262) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:231) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:400) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:1003) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2535) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:613) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.lambda$tracedRunnable$2(TraceUtil.java:155) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-07T15:48:26,987 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-07T15:48:27,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40735 is added to blk_1073741828_1004 (size=1189) 2024-11-07T15:48:27,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35461 is added to blk_1073741828_1004 (size=1189) 2024-11-07T15:48:27,056 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/MasterData/data/master/store 2024-11-07T15:48:27,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35461 is added to blk_1073741829_1005 (size=34) 2024-11-07T15:48:27,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40735 is added to blk_1073741829_1005 (size=34) 2024-11-07T15:48:27,488 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-07T15:48:27,497 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T15:48:27,499 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-07T15:48:27,499 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T15:48:27,500 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T15:48:27,503 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-07T15:48:27,504 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T15:48:27,504 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T15:48:27,506 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1730994507499Disabling compacts and flushes for region at 1730994507499Disabling writes for close at 1730994507503 (+4 ms)Writing region close event to WAL at 1730994507504 (+1 ms)Closed at 1730994507504 2024-11-07T15:48:27,511 WARN [master/c2682f91c1e2:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/MasterData/data/master/store/.initializing 2024-11-07T15:48:27,512 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/MasterData/WALs/c2682f91c1e2,38893,1730994505717 2024-11-07T15:48:27,528 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-07T15:48:27,554 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c2682f91c1e2%2C38893%2C1730994505717, suffix=, logDir=hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/MasterData/WALs/c2682f91c1e2,38893,1730994505717, archiveDir=hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/MasterData/oldWALs, maxLogs=10 2024-11-07T15:48:27,589 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/MasterData/WALs/c2682f91c1e2,38893,1730994505717/c2682f91c1e2%2C38893%2C1730994505717.1730994507561, exclude list is [], retry=0 2024-11-07T15:48:27,613 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40735,DS-9b29c662-46af-4f01-a960-db4bbd90c3b6,DISK] 2024-11-07T15:48:27,614 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35461,DS-131cfc8f-5016-40bd-99cc-b52dd3fd14ba,DISK] 2024-11-07T15:48:27,618 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-07T15:48:27,666 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/MasterData/WALs/c2682f91c1e2,38893,1730994505717/c2682f91c1e2%2C38893%2C1730994505717.1730994507561 2024-11-07T15:48:27,667 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39027:39027),(127.0.0.1/127.0.0.1:32777:32777)] 2024-11-07T15:48:27,668 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-07T15:48:27,669 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T15:48:27,673 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T15:48:27,674 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T15:48:27,727 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T15:48:27,759 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-07T15:48:27,765 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T15:48:27,768 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T15:48:27,769 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T15:48:27,773 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-07T15:48:27,774 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T15:48:27,775 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T15:48:27,775 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T15:48:27,779 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-07T15:48:27,779 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T15:48:27,780 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T15:48:27,781 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-07T15:48:27,784 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-07T15:48:27,784 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T15:48:27,785 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T15:48:27,785 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T15:48:27,789 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-07T15:48:27,792 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-07T15:48:27,800 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T15:48:27,801 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T15:48:27,807 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-07T15:48:27,811 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-07T15:48:27,817 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T15:48:27,824 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67994761, jitterRate=0.013200893998146057}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-07T15:48:27,839 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1730994507693Initializing all the Stores at 1730994507697 (+4 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730994507698 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730994507699 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730994507699Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730994507699Cleaning up temporary data from old regions at 1730994507801 (+102 ms)Region opened successfully at 1730994507838 (+37 ms) 2024-11-07T15:48:27,848 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-07T15:48:27,903 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@861597d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c2682f91c1e2/172.17.0.2:0 2024-11-07T15:48:27,945 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-07T15:48:27,957 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-07T15:48:27,957 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-07T15:48:27,961 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-07T15:48:27,962 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-07T15:48:27,969 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 6 msec 2024-11-07T15:48:27,969 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-07T15:48:28,001 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-07T15:48:28,013 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38893-0x101836b79d20000, quorum=127.0.0.1:50813, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-07T15:48:28,018 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-07T15:48:28,020 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-07T15:48:28,023 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38893-0x101836b79d20000, quorum=127.0.0.1:50813, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-07T15:48:28,025 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-07T15:48:28,027 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-07T15:48:28,031 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38893-0x101836b79d20000, quorum=127.0.0.1:50813, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-07T15:48:28,032 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-07T15:48:28,033 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38893-0x101836b79d20000, quorum=127.0.0.1:50813, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-07T15:48:28,035 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-07T15:48:28,052 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38893-0x101836b79d20000, quorum=127.0.0.1:50813, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-07T15:48:28,053 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-07T15:48:28,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37185-0x101836b79d20002, quorum=127.0.0.1:50813, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-07T15:48:28,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34001-0x101836b79d20001, quorum=127.0.0.1:50813, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-07T15:48:28,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38893-0x101836b79d20000, quorum=127.0.0.1:50813, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-07T15:48:28,058 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34001-0x101836b79d20001, quorum=127.0.0.1:50813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T15:48:28,058 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38893-0x101836b79d20000, quorum=127.0.0.1:50813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T15:48:28,058 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37185-0x101836b79d20002, quorum=127.0.0.1:50813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T15:48:28,061 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=c2682f91c1e2,38893,1730994505717, sessionid=0x101836b79d20000, setting cluster-up flag (Was=false) 2024-11-07T15:48:28,074 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38893-0x101836b79d20000, quorum=127.0.0.1:50813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T15:48:28,074 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34001-0x101836b79d20001, quorum=127.0.0.1:50813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T15:48:28,074 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37185-0x101836b79d20002, quorum=127.0.0.1:50813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T15:48:28,080 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-07T15:48:28,082 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c2682f91c1e2,38893,1730994505717 2024-11-07T15:48:28,087 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38893-0x101836b79d20000, quorum=127.0.0.1:50813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T15:48:28,087 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37185-0x101836b79d20002, quorum=127.0.0.1:50813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T15:48:28,087 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34001-0x101836b79d20001, quorum=127.0.0.1:50813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T15:48:28,093 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-07T15:48:28,095 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c2682f91c1e2,38893,1730994505717 2024-11-07T15:48:28,102 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-07T15:48:28,132 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.quotas.MasterQuotasObserver loaded, priority=536870911. 2024-11-07T15:48:28,174 INFO [RS:1;c2682f91c1e2:37185 {}] regionserver.HRegionServer(746): ClusterId : 54fed9da-5a5f-49ce-a350-94d6e7a82769 2024-11-07T15:48:28,177 INFO [RS:0;c2682f91c1e2:34001 {}] regionserver.HRegionServer(746): ClusterId : 54fed9da-5a5f-49ce-a350-94d6e7a82769 2024-11-07T15:48:28,178 DEBUG [RS:1;c2682f91c1e2:37185 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-07T15:48:28,178 DEBUG [RS:0;c2682f91c1e2:34001 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-07T15:48:28,186 DEBUG [RS:1;c2682f91c1e2:37185 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-07T15:48:28,186 DEBUG [RS:0;c2682f91c1e2:34001 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-07T15:48:28,186 DEBUG [RS:1;c2682f91c1e2:37185 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-07T15:48:28,186 DEBUG [RS:0;c2682f91c1e2:34001 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-07T15:48:28,191 DEBUG [RS:0;c2682f91c1e2:34001 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-07T15:48:28,191 DEBUG [RS:1;c2682f91c1e2:37185 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-07T15:48:28,191 DEBUG [RS:0;c2682f91c1e2:34001 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@13405703, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c2682f91c1e2/172.17.0.2:0 2024-11-07T15:48:28,191 DEBUG [RS:1;c2682f91c1e2:37185 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@70a58960, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c2682f91c1e2/172.17.0.2:0 2024-11-07T15:48:28,204 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-07T15:48:28,210 DEBUG [RS:0;c2682f91c1e2:34001 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;c2682f91c1e2:34001 2024-11-07T15:48:28,214 INFO [RS:0;c2682f91c1e2:34001 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-07T15:48:28,214 INFO [RS:0;c2682f91c1e2:34001 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-07T15:48:28,214 DEBUG [RS:0;c2682f91c1e2:34001 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-07T15:48:28,217 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-07T15:48:28,218 INFO [RS:0;c2682f91c1e2:34001 {}] regionserver.HRegionServer(2659): reportForDuty to master=c2682f91c1e2,38893,1730994505717 with port=34001, startcode=1730994506521 2024-11-07T15:48:28,221 DEBUG [RS:1;c2682f91c1e2:37185 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;c2682f91c1e2:37185 2024-11-07T15:48:28,221 INFO [RS:1;c2682f91c1e2:37185 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-07T15:48:28,222 INFO [RS:1;c2682f91c1e2:37185 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-07T15:48:28,222 DEBUG [RS:1;c2682f91c1e2:37185 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-07T15:48:28,223 INFO [RS:1;c2682f91c1e2:37185 {}] regionserver.HRegionServer(2659): reportForDuty to master=c2682f91c1e2,38893,1730994505717 with port=37185, startcode=1730994506636 2024-11-07T15:48:28,235 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-07T15:48:28,238 DEBUG [RS:1;c2682f91c1e2:37185 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-07T15:48:28,238 DEBUG [RS:0;c2682f91c1e2:34001 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-07T15:48:28,242 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: c2682f91c1e2,38893,1730994505717 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-07T15:48:28,252 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/c2682f91c1e2:0, corePoolSize=5, maxPoolSize=5 2024-11-07T15:48:28,252 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/c2682f91c1e2:0, corePoolSize=5, maxPoolSize=5 2024-11-07T15:48:28,253 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/c2682f91c1e2:0, corePoolSize=5, maxPoolSize=5 2024-11-07T15:48:28,253 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/c2682f91c1e2:0, corePoolSize=5, maxPoolSize=5 2024-11-07T15:48:28,253 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/c2682f91c1e2:0, corePoolSize=10, maxPoolSize=10 2024-11-07T15:48:28,253 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/c2682f91c1e2:0, corePoolSize=1, maxPoolSize=1 2024-11-07T15:48:28,253 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/c2682f91c1e2:0, corePoolSize=2, maxPoolSize=2 2024-11-07T15:48:28,254 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/c2682f91c1e2:0, corePoolSize=1, maxPoolSize=1 2024-11-07T15:48:28,264 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1730994538264 2024-11-07T15:48:28,267 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-07T15:48:28,268 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-07T15:48:28,272 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-07T15:48:28,272 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-07T15:48:28,273 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-07T15:48:28,274 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-07T15:48:28,275 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-07T15:48:28,275 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-07T15:48:28,276 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-07T15:48:28,282 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58969, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-07T15:48:28,282 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54753, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-07T15:48:28,285 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T15:48:28,285 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-07T15:48:28,288 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-07T15:48:28,289 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38893 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-07T15:48:28,290 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-07T15:48:28,290 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-07T15:48:28,294 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-07T15:48:28,294 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-07T15:48:28,296 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38893 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-07T15:48:28,297 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/c2682f91c1e2:0:becomeActiveMaster-HFileCleaner.large.0-1730994508296,5,FailOnTimeoutGroup] 2024-11-07T15:48:28,297 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/c2682f91c1e2:0:becomeActiveMaster-HFileCleaner.small.0-1730994508297,5,FailOnTimeoutGroup] 2024-11-07T15:48:28,298 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-07T15:48:28,298 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-07T15:48:28,299 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-07T15:48:28,300 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-07T15:48:28,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40735 is added to blk_1073741831_1007 (size=1321) 2024-11-07T15:48:28,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35461 is added to blk_1073741831_1007 (size=1321) 2024-11-07T15:48:28,306 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-07T15:48:28,307 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61 2024-11-07T15:48:28,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40735 is added to blk_1073741832_1008 (size=32) 2024-11-07T15:48:28,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35461 is added to blk_1073741832_1008 (size=32) 2024-11-07T15:48:28,323 DEBUG [RS:1;c2682f91c1e2:37185 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-07T15:48:28,323 DEBUG [RS:0;c2682f91c1e2:34001 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-07T15:48:28,323 WARN [RS:1;c2682f91c1e2:37185 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-07T15:48:28,323 WARN [RS:0;c2682f91c1e2:34001 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-07T15:48:28,325 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T15:48:28,329 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-07T15:48:28,334 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-07T15:48:28,334 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T15:48:28,335 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T15:48:28,336 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-07T15:48:28,338 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-07T15:48:28,338 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T15:48:28,339 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T15:48:28,339 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-07T15:48:28,342 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-07T15:48:28,342 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T15:48:28,343 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T15:48:28,343 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-07T15:48:28,346 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-07T15:48:28,346 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T15:48:28,347 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T15:48:28,348 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-07T15:48:28,350 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/hbase/meta/1588230740 2024-11-07T15:48:28,352 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/hbase/meta/1588230740 2024-11-07T15:48:28,356 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-07T15:48:28,356 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-07T15:48:28,357 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-07T15:48:28,359 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-07T15:48:28,365 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T15:48:28,367 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73186457, jitterRate=0.09056319296360016}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-07T15:48:28,370 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1730994508325Initializing all the Stores at 1730994508327 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730994508327Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730994508328 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730994508328Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730994508328Cleaning up temporary data from old regions at 1730994508356 (+28 ms)Region opened successfully at 1730994508370 (+14 ms) 2024-11-07T15:48:28,370 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-07T15:48:28,370 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-07T15:48:28,370 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-07T15:48:28,370 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-07T15:48:28,371 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-07T15:48:28,372 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-07T15:48:28,372 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1730994508370Disabling compacts and flushes for region at 1730994508370Disabling writes for close at 1730994508371 (+1 ms)Writing region close event to WAL at 1730994508372 (+1 ms)Closed at 1730994508372 2024-11-07T15:48:28,377 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-07T15:48:28,377 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-07T15:48:28,384 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-07T15:48:28,393 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-07T15:48:28,396 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-07T15:48:28,425 INFO [RS:0;c2682f91c1e2:34001 {}] regionserver.HRegionServer(2659): reportForDuty to master=c2682f91c1e2,38893,1730994505717 with port=34001, startcode=1730994506521 2024-11-07T15:48:28,425 INFO [RS:1;c2682f91c1e2:37185 {}] regionserver.HRegionServer(2659): reportForDuty to master=c2682f91c1e2,38893,1730994505717 with port=37185, startcode=1730994506636 2024-11-07T15:48:28,427 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38893 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c2682f91c1e2,37185,1730994506636 2024-11-07T15:48:28,431 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38893 {}] master.ServerManager(517): Registering regionserver=c2682f91c1e2,37185,1730994506636 2024-11-07T15:48:28,440 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38893 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c2682f91c1e2,34001,1730994506521 2024-11-07T15:48:28,441 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38893 {}] master.ServerManager(517): Registering regionserver=c2682f91c1e2,34001,1730994506521 2024-11-07T15:48:28,441 DEBUG [RS:1;c2682f91c1e2:37185 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61 2024-11-07T15:48:28,441 DEBUG [RS:1;c2682f91c1e2:37185 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44951 2024-11-07T15:48:28,441 DEBUG [RS:1;c2682f91c1e2:37185 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-07T15:48:28,445 DEBUG [RS:0;c2682f91c1e2:34001 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61 2024-11-07T15:48:28,445 DEBUG [RS:0;c2682f91c1e2:34001 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44951 2024-11-07T15:48:28,445 DEBUG [RS:0;c2682f91c1e2:34001 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-07T15:48:28,446 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38893-0x101836b79d20000, quorum=127.0.0.1:50813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-07T15:48:28,451 DEBUG [RS:1;c2682f91c1e2:37185 {}] zookeeper.ZKUtil(111): regionserver:37185-0x101836b79d20002, quorum=127.0.0.1:50813, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c2682f91c1e2,37185,1730994506636 2024-11-07T15:48:28,451 WARN [RS:1;c2682f91c1e2:37185 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-07T15:48:28,451 INFO [RS:1;c2682f91c1e2:37185 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-07T15:48:28,452 DEBUG [RS:1;c2682f91c1e2:37185 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/WALs/c2682f91c1e2,37185,1730994506636 2024-11-07T15:48:28,452 DEBUG [RS:0;c2682f91c1e2:34001 {}] zookeeper.ZKUtil(111): regionserver:34001-0x101836b79d20001, quorum=127.0.0.1:50813, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c2682f91c1e2,34001,1730994506521 2024-11-07T15:48:28,452 WARN [RS:0;c2682f91c1e2:34001 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-07T15:48:28,452 INFO [RS:0;c2682f91c1e2:34001 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-07T15:48:28,452 DEBUG [RS:0;c2682f91c1e2:34001 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/WALs/c2682f91c1e2,34001,1730994506521 2024-11-07T15:48:28,453 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c2682f91c1e2,37185,1730994506636] 2024-11-07T15:48:28,453 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c2682f91c1e2,34001,1730994506521] 2024-11-07T15:48:28,486 INFO [RS:0;c2682f91c1e2:34001 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-07T15:48:28,486 INFO [RS:1;c2682f91c1e2:37185 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-07T15:48:28,503 INFO [RS:1;c2682f91c1e2:37185 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-07T15:48:28,503 INFO [RS:0;c2682f91c1e2:34001 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-07T15:48:28,511 INFO [RS:1;c2682f91c1e2:37185 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-07T15:48:28,511 INFO [RS:0;c2682f91c1e2:34001 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-07T15:48:28,511 INFO [RS:1;c2682f91c1e2:37185 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T15:48:28,511 INFO [RS:0;c2682f91c1e2:34001 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T15:48:28,513 INFO [RS:1;c2682f91c1e2:37185 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-07T15:48:28,513 INFO [RS:0;c2682f91c1e2:34001 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-07T15:48:28,520 INFO [RS:0;c2682f91c1e2:34001 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-07T15:48:28,522 INFO [RS:0;c2682f91c1e2:34001 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-07T15:48:28,522 INFO [RS:1;c2682f91c1e2:37185 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-07T15:48:28,522 DEBUG [RS:0;c2682f91c1e2:34001 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c2682f91c1e2:0, corePoolSize=1, maxPoolSize=1 2024-11-07T15:48:28,523 INFO [RS:1;c2682f91c1e2:37185 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-07T15:48:28,523 DEBUG [RS:0;c2682f91c1e2:34001 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c2682f91c1e2:0, corePoolSize=1, maxPoolSize=1 2024-11-07T15:48:28,523 DEBUG [RS:1;c2682f91c1e2:37185 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c2682f91c1e2:0, corePoolSize=1, maxPoolSize=1 2024-11-07T15:48:28,523 DEBUG [RS:0;c2682f91c1e2:34001 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c2682f91c1e2:0, corePoolSize=1, maxPoolSize=1 2024-11-07T15:48:28,523 DEBUG [RS:1;c2682f91c1e2:37185 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c2682f91c1e2:0, corePoolSize=1, maxPoolSize=1 2024-11-07T15:48:28,523 DEBUG [RS:0;c2682f91c1e2:34001 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c2682f91c1e2:0, corePoolSize=1, maxPoolSize=1 2024-11-07T15:48:28,523 DEBUG [RS:1;c2682f91c1e2:37185 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c2682f91c1e2:0, corePoolSize=1, maxPoolSize=1 2024-11-07T15:48:28,523 DEBUG [RS:0;c2682f91c1e2:34001 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c2682f91c1e2:0, corePoolSize=1, maxPoolSize=1 2024-11-07T15:48:28,523 DEBUG [RS:1;c2682f91c1e2:37185 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c2682f91c1e2:0, corePoolSize=1, maxPoolSize=1 2024-11-07T15:48:28,524 DEBUG [RS:0;c2682f91c1e2:34001 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c2682f91c1e2:0, corePoolSize=2, maxPoolSize=2 2024-11-07T15:48:28,524 DEBUG [RS:1;c2682f91c1e2:37185 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c2682f91c1e2:0, corePoolSize=1, maxPoolSize=1 2024-11-07T15:48:28,524 DEBUG [RS:1;c2682f91c1e2:37185 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c2682f91c1e2:0, corePoolSize=2, maxPoolSize=2 2024-11-07T15:48:28,524 DEBUG [RS:0;c2682f91c1e2:34001 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c2682f91c1e2:0, corePoolSize=1, maxPoolSize=1 2024-11-07T15:48:28,524 DEBUG [RS:1;c2682f91c1e2:37185 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c2682f91c1e2:0, corePoolSize=1, maxPoolSize=1 2024-11-07T15:48:28,524 DEBUG [RS:0;c2682f91c1e2:34001 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c2682f91c1e2:0, corePoolSize=1, maxPoolSize=1 2024-11-07T15:48:28,524 DEBUG [RS:0;c2682f91c1e2:34001 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c2682f91c1e2:0, corePoolSize=1, maxPoolSize=1 2024-11-07T15:48:28,524 DEBUG [RS:1;c2682f91c1e2:37185 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c2682f91c1e2:0, corePoolSize=1, maxPoolSize=1 2024-11-07T15:48:28,524 DEBUG [RS:1;c2682f91c1e2:37185 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c2682f91c1e2:0, corePoolSize=1, maxPoolSize=1 2024-11-07T15:48:28,524 DEBUG [RS:0;c2682f91c1e2:34001 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c2682f91c1e2:0, corePoolSize=1, maxPoolSize=1 2024-11-07T15:48:28,525 DEBUG [RS:1;c2682f91c1e2:37185 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c2682f91c1e2:0, corePoolSize=1, maxPoolSize=1 2024-11-07T15:48:28,525 DEBUG [RS:0;c2682f91c1e2:34001 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c2682f91c1e2:0, corePoolSize=1, maxPoolSize=1 2024-11-07T15:48:28,525 DEBUG [RS:1;c2682f91c1e2:37185 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c2682f91c1e2:0, corePoolSize=1, maxPoolSize=1 2024-11-07T15:48:28,525 DEBUG [RS:0;c2682f91c1e2:34001 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c2682f91c1e2:0, corePoolSize=1, maxPoolSize=1 2024-11-07T15:48:28,525 DEBUG [RS:1;c2682f91c1e2:37185 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c2682f91c1e2:0, corePoolSize=1, maxPoolSize=1 2024-11-07T15:48:28,525 DEBUG [RS:0;c2682f91c1e2:34001 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c2682f91c1e2:0, corePoolSize=3, maxPoolSize=3 2024-11-07T15:48:28,525 DEBUG [RS:0;c2682f91c1e2:34001 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c2682f91c1e2:0, corePoolSize=3, maxPoolSize=3 2024-11-07T15:48:28,525 DEBUG [RS:1;c2682f91c1e2:37185 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c2682f91c1e2:0, corePoolSize=3, maxPoolSize=3 2024-11-07T15:48:28,526 DEBUG [RS:1;c2682f91c1e2:37185 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c2682f91c1e2:0, corePoolSize=3, maxPoolSize=3 2024-11-07T15:48:28,528 INFO [RS:0;c2682f91c1e2:34001 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-07T15:48:28,528 INFO [RS:1;c2682f91c1e2:37185 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-07T15:48:28,528 INFO [RS:0;c2682f91c1e2:34001 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-07T15:48:28,528 INFO [RS:0;c2682f91c1e2:34001 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T15:48:28,528 INFO [RS:1;c2682f91c1e2:37185 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-07T15:48:28,528 INFO [RS:0;c2682f91c1e2:34001 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-07T15:48:28,529 INFO [RS:1;c2682f91c1e2:37185 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T15:48:28,529 INFO [RS:0;c2682f91c1e2:34001 {}] hbase.ChoreService(168): Chore ScheduledChore name=FileSystemUtilizationChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T15:48:28,529 INFO [RS:0;c2682f91c1e2:34001 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-07T15:48:28,529 INFO [RS:1;c2682f91c1e2:37185 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-07T15:48:28,529 INFO [RS:0;c2682f91c1e2:34001 {}] hbase.ChoreService(168): Chore ScheduledChore name=c2682f91c1e2,34001,1730994506521-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-07T15:48:28,529 INFO [RS:1;c2682f91c1e2:37185 {}] hbase.ChoreService(168): Chore ScheduledChore name=FileSystemUtilizationChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T15:48:28,529 INFO [RS:1;c2682f91c1e2:37185 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-07T15:48:28,529 INFO [RS:1;c2682f91c1e2:37185 {}] hbase.ChoreService(168): Chore ScheduledChore name=c2682f91c1e2,37185,1730994506636-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-07T15:48:28,547 WARN [c2682f91c1e2:38893 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-07T15:48:28,557 INFO [RS:0;c2682f91c1e2:34001 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-07T15:48:28,560 INFO [RS:0;c2682f91c1e2:34001 {}] hbase.ChoreService(168): Chore ScheduledChore name=c2682f91c1e2,34001,1730994506521-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T15:48:28,560 INFO [RS:1;c2682f91c1e2:37185 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-07T15:48:28,561 INFO [RS:0;c2682f91c1e2:34001 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T15:48:28,561 INFO [RS:1;c2682f91c1e2:37185 {}] hbase.ChoreService(168): Chore ScheduledChore name=c2682f91c1e2,37185,1730994506636-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T15:48:28,561 INFO [RS:0;c2682f91c1e2:34001 {}] regionserver.Replication(171): c2682f91c1e2,34001,1730994506521 started 2024-11-07T15:48:28,561 INFO [RS:1;c2682f91c1e2:37185 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T15:48:28,561 INFO [RS:1;c2682f91c1e2:37185 {}] regionserver.Replication(171): c2682f91c1e2,37185,1730994506636 started 2024-11-07T15:48:28,585 INFO [RS:0;c2682f91c1e2:34001 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T15:48:28,585 INFO [RS:0;c2682f91c1e2:34001 {}] regionserver.HRegionServer(1482): Serving as c2682f91c1e2,34001,1730994506521, RpcServer on c2682f91c1e2/172.17.0.2:34001, sessionid=0x101836b79d20001 2024-11-07T15:48:28,587 INFO [RS:1;c2682f91c1e2:37185 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T15:48:28,587 DEBUG [RS:0;c2682f91c1e2:34001 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-07T15:48:28,587 INFO [RS:1;c2682f91c1e2:37185 {}] regionserver.HRegionServer(1482): Serving as c2682f91c1e2,37185,1730994506636, RpcServer on c2682f91c1e2/172.17.0.2:37185, sessionid=0x101836b79d20002 2024-11-07T15:48:28,587 DEBUG [RS:0;c2682f91c1e2:34001 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c2682f91c1e2,34001,1730994506521 2024-11-07T15:48:28,587 DEBUG [RS:1;c2682f91c1e2:37185 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-07T15:48:28,587 DEBUG [RS:1;c2682f91c1e2:37185 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c2682f91c1e2,37185,1730994506636 2024-11-07T15:48:28,587 DEBUG [RS:1;c2682f91c1e2:37185 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c2682f91c1e2,37185,1730994506636' 2024-11-07T15:48:28,587 DEBUG [RS:0;c2682f91c1e2:34001 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c2682f91c1e2,34001,1730994506521' 2024-11-07T15:48:28,588 DEBUG [RS:1;c2682f91c1e2:37185 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-07T15:48:28,588 DEBUG [RS:0;c2682f91c1e2:34001 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-07T15:48:28,590 DEBUG [RS:0;c2682f91c1e2:34001 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-07T15:48:28,590 DEBUG [RS:1;c2682f91c1e2:37185 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-07T15:48:28,591 DEBUG [RS:0;c2682f91c1e2:34001 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-07T15:48:28,591 DEBUG [RS:0;c2682f91c1e2:34001 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-07T15:48:28,591 DEBUG [RS:0;c2682f91c1e2:34001 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c2682f91c1e2,34001,1730994506521 2024-11-07T15:48:28,591 DEBUG [RS:0;c2682f91c1e2:34001 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c2682f91c1e2,34001,1730994506521' 2024-11-07T15:48:28,591 DEBUG [RS:0;c2682f91c1e2:34001 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-07T15:48:28,591 DEBUG [RS:1;c2682f91c1e2:37185 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-07T15:48:28,591 DEBUG [RS:1;c2682f91c1e2:37185 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-07T15:48:28,591 DEBUG [RS:1;c2682f91c1e2:37185 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c2682f91c1e2,37185,1730994506636 2024-11-07T15:48:28,592 DEBUG [RS:1;c2682f91c1e2:37185 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c2682f91c1e2,37185,1730994506636' 2024-11-07T15:48:28,592 DEBUG [RS:1;c2682f91c1e2:37185 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-07T15:48:28,593 DEBUG [RS:1;c2682f91c1e2:37185 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-07T15:48:28,595 DEBUG [RS:0;c2682f91c1e2:34001 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-07T15:48:28,599 DEBUG [RS:0;c2682f91c1e2:34001 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-07T15:48:28,600 INFO [RS:0;c2682f91c1e2:34001 {}] quotas.RegionServerRpcQuotaManager(68): Initializing RPC quota support 2024-11-07T15:48:28,602 INFO [RS:0;c2682f91c1e2:34001 {}] hbase.ChoreService(168): Chore ScheduledChore name=QuotaRefresherChore, period=1800000, unit=MILLISECONDS is enabled. 2024-11-07T15:48:28,602 DEBUG [RS:1;c2682f91c1e2:37185 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-07T15:48:28,602 INFO [RS:1;c2682f91c1e2:37185 {}] quotas.RegionServerRpcQuotaManager(68): Initializing RPC quota support 2024-11-07T15:48:28,603 INFO [RS:1;c2682f91c1e2:37185 {}] hbase.ChoreService(168): Chore ScheduledChore name=QuotaRefresherChore, period=1800000, unit=MILLISECONDS is enabled. 2024-11-07T15:48:28,606 DEBUG [RS:1;c2682f91c1e2:37185 {}] zookeeper.ZKUtil(347): regionserver:37185-0x101836b79d20002, quorum=127.0.0.1:50813, baseZNode=/hbase Unable to get data of znode /hbase/rpc-throttle because node does not exist (not an error) 2024-11-07T15:48:28,606 INFO [RS:1;c2682f91c1e2:37185 {}] quotas.RegionServerRpcQuotaManager(74): Start rpc quota manager and rpc throttle enabled is true 2024-11-07T15:48:28,607 DEBUG [RS:0;c2682f91c1e2:34001 {}] zookeeper.ZKUtil(347): regionserver:34001-0x101836b79d20001, quorum=127.0.0.1:50813, baseZNode=/hbase Unable to get data of znode /hbase/rpc-throttle because node does not exist (not an error) 2024-11-07T15:48:28,607 INFO [RS:1;c2682f91c1e2:37185 {}] hbase.ChoreService(168): Chore ScheduledChore name=SpaceQuotaRefresherChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T15:48:28,607 INFO [RS:0;c2682f91c1e2:34001 {}] quotas.RegionServerRpcQuotaManager(74): Start rpc quota manager and rpc throttle enabled is true 2024-11-07T15:48:28,607 INFO [RS:0;c2682f91c1e2:34001 {}] hbase.ChoreService(168): Chore ScheduledChore name=SpaceQuotaRefresherChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T15:48:28,607 INFO [RS:0;c2682f91c1e2:34001 {}] hbase.ChoreService(168): Chore ScheduledChore name=RegionSizeReportingChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T15:48:28,607 INFO [RS:1;c2682f91c1e2:37185 {}] hbase.ChoreService(168): Chore ScheduledChore name=RegionSizeReportingChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T15:48:28,719 INFO [RS:0;c2682f91c1e2:34001 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-07T15:48:28,719 INFO [RS:1;c2682f91c1e2:37185 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-07T15:48:28,723 INFO [RS:1;c2682f91c1e2:37185 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c2682f91c1e2%2C37185%2C1730994506636, suffix=, logDir=hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/WALs/c2682f91c1e2,37185,1730994506636, archiveDir=hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/oldWALs, maxLogs=32 2024-11-07T15:48:28,723 INFO [RS:0;c2682f91c1e2:34001 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c2682f91c1e2%2C34001%2C1730994506521, suffix=, logDir=hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/WALs/c2682f91c1e2,34001,1730994506521, archiveDir=hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/oldWALs, maxLogs=32 2024-11-07T15:48:28,742 DEBUG [RS:1;c2682f91c1e2:37185 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/WALs/c2682f91c1e2,37185,1730994506636/c2682f91c1e2%2C37185%2C1730994506636.1730994508727, exclude list is [], retry=0 2024-11-07T15:48:28,748 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40735,DS-9b29c662-46af-4f01-a960-db4bbd90c3b6,DISK] 2024-11-07T15:48:28,749 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35461,DS-131cfc8f-5016-40bd-99cc-b52dd3fd14ba,DISK] 2024-11-07T15:48:28,750 DEBUG [RS:0;c2682f91c1e2:34001 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/WALs/c2682f91c1e2,34001,1730994506521/c2682f91c1e2%2C34001%2C1730994506521.1730994508728, exclude list is [], retry=0 2024-11-07T15:48:28,755 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35461,DS-131cfc8f-5016-40bd-99cc-b52dd3fd14ba,DISK] 2024-11-07T15:48:28,756 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40735,DS-9b29c662-46af-4f01-a960-db4bbd90c3b6,DISK] 2024-11-07T15:48:28,766 INFO [RS:0;c2682f91c1e2:34001 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/WALs/c2682f91c1e2,34001,1730994506521/c2682f91c1e2%2C34001%2C1730994506521.1730994508728 2024-11-07T15:48:28,768 DEBUG [RS:0;c2682f91c1e2:34001 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39027:39027),(127.0.0.1/127.0.0.1:32777:32777)] 2024-11-07T15:48:28,770 INFO [RS:1;c2682f91c1e2:37185 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/WALs/c2682f91c1e2,37185,1730994506636/c2682f91c1e2%2C37185%2C1730994506636.1730994508727 2024-11-07T15:48:28,773 DEBUG [RS:1;c2682f91c1e2:37185 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:32777:32777),(127.0.0.1/127.0.0.1:39027:39027)] 2024-11-07T15:48:28,800 DEBUG [c2682f91c1e2:38893 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=2, allServersCount=2 2024-11-07T15:48:28,809 DEBUG [c2682f91c1e2:38893 {}] balancer.BalancerClusterState(204): Hosts are {c2682f91c1e2=0} racks are {/default-rack=0} 2024-11-07T15:48:28,816 DEBUG [c2682f91c1e2:38893 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-07T15:48:28,816 DEBUG [c2682f91c1e2:38893 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-07T15:48:28,816 DEBUG [c2682f91c1e2:38893 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-07T15:48:28,816 DEBUG [c2682f91c1e2:38893 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-07T15:48:28,816 INFO [c2682f91c1e2:38893 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-07T15:48:28,816 INFO [c2682f91c1e2:38893 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-07T15:48:28,817 DEBUG [c2682f91c1e2:38893 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-07T15:48:28,828 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=c2682f91c1e2,34001,1730994506521 2024-11-07T15:48:28,836 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c2682f91c1e2,34001,1730994506521, state=OPENING 2024-11-07T15:48:28,841 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-07T15:48:28,844 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38893-0x101836b79d20000, quorum=127.0.0.1:50813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T15:48:28,844 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34001-0x101836b79d20001, quorum=127.0.0.1:50813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T15:48:28,844 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37185-0x101836b79d20002, quorum=127.0.0.1:50813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T15:48:28,845 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T15:48:28,846 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T15:48:28,846 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T15:48:28,847 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-07T15:48:28,849 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=c2682f91c1e2,34001,1730994506521}] 2024-11-07T15:48:29,045 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-07T15:48:29,047 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52885, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-07T15:48:29,060 INFO [RS_OPEN_META-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-07T15:48:29,060 INFO [RS_OPEN_META-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-07T15:48:29,061 INFO [RS_OPEN_META-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-07T15:48:29,064 INFO [RS_OPEN_META-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c2682f91c1e2%2C34001%2C1730994506521.meta, suffix=.meta, logDir=hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/WALs/c2682f91c1e2,34001,1730994506521, archiveDir=hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/oldWALs, maxLogs=32 2024-11-07T15:48:29,083 DEBUG [RS_OPEN_META-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/WALs/c2682f91c1e2,34001,1730994506521/c2682f91c1e2%2C34001%2C1730994506521.meta.1730994509067.meta, exclude list is [], retry=0 2024-11-07T15:48:29,088 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40735,DS-9b29c662-46af-4f01-a960-db4bbd90c3b6,DISK] 2024-11-07T15:48:29,088 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35461,DS-131cfc8f-5016-40bd-99cc-b52dd3fd14ba,DISK] 2024-11-07T15:48:29,093 INFO [RS_OPEN_META-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/WALs/c2682f91c1e2,34001,1730994506521/c2682f91c1e2%2C34001%2C1730994506521.meta.1730994509067.meta 2024-11-07T15:48:29,093 DEBUG [RS_OPEN_META-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:32777:32777),(127.0.0.1/127.0.0.1:39027:39027)] 2024-11-07T15:48:29,093 DEBUG [RS_OPEN_META-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-07T15:48:29,095 DEBUG [RS_OPEN_META-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-07T15:48:29,098 DEBUG [RS_OPEN_META-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-07T15:48:29,100 INFO [RS_OPEN_META-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-07T15:48:29,105 DEBUG [RS_OPEN_META-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-07T15:48:29,105 DEBUG [RS_OPEN_META-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T15:48:29,106 DEBUG [RS_OPEN_META-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-07T15:48:29,106 DEBUG [RS_OPEN_META-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-07T15:48:29,111 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-07T15:48:29,114 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-07T15:48:29,115 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T15:48:29,116 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T15:48:29,116 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-07T15:48:29,118 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-07T15:48:29,118 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T15:48:29,119 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T15:48:29,119 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-07T15:48:29,121 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-07T15:48:29,121 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T15:48:29,122 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T15:48:29,122 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-07T15:48:29,124 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-07T15:48:29,124 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T15:48:29,125 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-07T15:48:29,125 DEBUG [RS_OPEN_META-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-07T15:48:29,127 DEBUG [RS_OPEN_META-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/hbase/meta/1588230740 2024-11-07T15:48:29,130 DEBUG [RS_OPEN_META-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/hbase/meta/1588230740 2024-11-07T15:48:29,134 DEBUG [RS_OPEN_META-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-07T15:48:29,134 DEBUG [RS_OPEN_META-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-07T15:48:29,135 DEBUG [RS_OPEN_META-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-07T15:48:29,138 DEBUG [RS_OPEN_META-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-07T15:48:29,140 INFO [RS_OPEN_META-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75490862, jitterRate=0.12490150332450867}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-07T15:48:29,141 DEBUG [RS_OPEN_META-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-07T15:48:29,143 DEBUG [RS_OPEN_META-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1730994509106Writing region info on filesystem at 1730994509107 (+1 ms)Initializing all the Stores at 1730994509109 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730994509109Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730994509111 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730994509111Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1730994509111Cleaning up temporary data from old regions at 1730994509134 (+23 ms)Running coprocessor post-open hooks at 1730994509141 (+7 ms)Region opened successfully at 1730994509143 (+2 ms) 2024-11-07T15:48:29,153 INFO [RS_OPEN_META-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1730994509035 2024-11-07T15:48:29,171 DEBUG [RS_OPEN_META-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-07T15:48:29,171 INFO [RS_OPEN_META-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-07T15:48:29,174 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=c2682f91c1e2,34001,1730994506521 2024-11-07T15:48:29,176 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c2682f91c1e2,34001,1730994506521, state=OPEN 2024-11-07T15:48:29,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37185-0x101836b79d20002, quorum=127.0.0.1:50813, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-07T15:48:29,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38893-0x101836b79d20000, quorum=127.0.0.1:50813, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-07T15:48:29,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34001-0x101836b79d20001, quorum=127.0.0.1:50813, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-07T15:48:29,179 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T15:48:29,179 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T15:48:29,179 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-07T15:48:29,180 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=c2682f91c1e2,34001,1730994506521 2024-11-07T15:48:29,187 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-07T15:48:29,187 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=c2682f91c1e2,34001,1730994506521 in 331 msec 2024-11-07T15:48:29,194 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-07T15:48:29,194 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 805 msec 2024-11-07T15:48:29,196 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-07T15:48:29,196 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-07T15:48:29,218 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-07T15:48:29,220 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c2682f91c1e2,34001,1730994506521, seqNum=-1] 2024-11-07T15:48:29,244 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T15:48:29,246 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39619, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T15:48:29,267 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1260 sec 2024-11-07T15:48:29,268 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1730994509267, completionTime=-1 2024-11-07T15:48:29,270 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=2; waited=0ms, expected min=2 server(s), max=2 server(s), master is running 2024-11-07T15:48:29,271 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-07T15:48:29,298 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=2 2024-11-07T15:48:29,298 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1730994569298 2024-11-07T15:48:29,298 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1730994629298 2024-11-07T15:48:29,298 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 27 msec 2024-11-07T15:48:29,300 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-07T15:48:29,307 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c2682f91c1e2,38893,1730994505717-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T15:48:29,308 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c2682f91c1e2,38893,1730994505717-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T15:48:29,308 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c2682f91c1e2,38893,1730994505717-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T15:48:29,310 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-c2682f91c1e2:38893, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T15:48:29,311 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-07T15:48:29,312 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-07T15:48:29,320 DEBUG [master/c2682f91c1e2:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-07T15:48:29,343 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.608sec 2024-11-07T15:48:29,345 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] quotas.MasterQuotaManager(103): Quota table not found. Creating... 2024-11-07T15:48:29,346 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] master.HMaster(2490): Client=null/null create 'hbase:quota', {NAME => 'q', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'u', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-07T15:48:29,354 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=hbase:quota 2024-11-07T15:48:29,356 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] quotas.MasterQuotaManager(107): Initializing quota support 2024-11-07T15:48:29,357 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] namespace.NamespaceStateManager(59): Namespace State Manager started. 2024-11-07T15:48:29,358 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=hbase:quota execute state=CREATE_TABLE_PRE_OPERATION 2024-11-07T15:48:29,359 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T15:48:29,361 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=hbase:quota execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-07T15:48:29,374 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] namespace.NamespaceStateManager(222): Finished updating state of 2 namespaces. 2024-11-07T15:48:29,374 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] namespace.NamespaceAuditor(50): NamespaceAuditor started. 2024-11-07T15:48:29,376 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=QuotaObserverChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-07T15:48:29,376 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=QuotaObserverChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T15:48:29,378 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-07T15:48:29,379 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-07T15:48:29,380 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-07T15:48:29,380 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-07T15:48:29,381 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c2682f91c1e2,38893,1730994505717-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-07T15:48:29,382 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c2682f91c1e2,38893,1730994505717-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-07T15:48:29,388 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-07T15:48:29,389 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@64e510e0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T15:48:29,389 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-07T15:48:29,390 INFO [master/c2682f91c1e2:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c2682f91c1e2,38893,1730994505717-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-07T15:48:29,392 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-07T15:48:29,392 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-07T15:48:29,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35461 is added to blk_1073741836_1012 (size=624) 2024-11-07T15:48:29,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40735 is added to blk_1073741836_1012 (size=624) 2024-11-07T15:48:29,397 INFO [RegionOpenAndInit-hbase:quota-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 72d83c7eae51146f5b941b52ec6566e1, NAME => 'hbase:quota,,1730994509346.72d83c7eae51146f5b941b52ec6566e1.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:quota', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'q', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'u', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61 2024-11-07T15:48:29,431 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request c2682f91c1e2,38893,-1 for getting cluster id 2024-11-07T15:48:29,434 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-07T15:48:29,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35461 is added to blk_1073741837_1013 (size=38) 2024-11-07T15:48:29,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40735 is added to blk_1073741837_1013 (size=38) 2024-11-07T15:48:29,446 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '54fed9da-5a5f-49ce-a350-94d6e7a82769' 2024-11-07T15:48:29,449 DEBUG [RPCClient-NioEventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-07T15:48:29,449 DEBUG [RPCClient-NioEventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "54fed9da-5a5f-49ce-a350-94d6e7a82769" 2024-11-07T15:48:29,474 DEBUG [RPCClient-NioEventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4bdc4fa4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T15:48:29,474 DEBUG [RPCClient-NioEventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c2682f91c1e2,38893,-1] 2024-11-07T15:48:29,478 DEBUG [RPCClient-NioEventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-07T15:48:29,480 DEBUG [RPCClient-NioEventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T15:48:29,482 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57202, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-07T15:48:29,485 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4521b335, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-07T15:48:29,485 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-07T15:48:29,493 DEBUG [RPCClient-NioEventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c2682f91c1e2,34001,1730994506521, seqNum=-1] 2024-11-07T15:48:29,493 DEBUG [RPCClient-NioEventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T15:48:29,496 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48376, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T15:48:29,527 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=c2682f91c1e2,38893,1730994505717 2024-11-07T15:48:29,528 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-11-07T15:48:29,841 DEBUG [RegionOpenAndInit-hbase:quota-pool-0 {}] regionserver.HRegion(898): Instantiated hbase:quota,,1730994509346.72d83c7eae51146f5b941b52ec6566e1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T15:48:29,841 DEBUG [RegionOpenAndInit-hbase:quota-pool-0 {}] regionserver.HRegion(1722): Closing 72d83c7eae51146f5b941b52ec6566e1, disabling compactions & flushes 2024-11-07T15:48:29,841 INFO [RegionOpenAndInit-hbase:quota-pool-0 {}] regionserver.HRegion(1755): Closing region hbase:quota,,1730994509346.72d83c7eae51146f5b941b52ec6566e1. 2024-11-07T15:48:29,841 DEBUG [RegionOpenAndInit-hbase:quota-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:quota,,1730994509346.72d83c7eae51146f5b941b52ec6566e1. 2024-11-07T15:48:29,841 DEBUG [RegionOpenAndInit-hbase:quota-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on hbase:quota,,1730994509346.72d83c7eae51146f5b941b52ec6566e1. after waiting 0 ms 2024-11-07T15:48:29,841 DEBUG [RegionOpenAndInit-hbase:quota-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region hbase:quota,,1730994509346.72d83c7eae51146f5b941b52ec6566e1. 2024-11-07T15:48:29,841 INFO [RegionOpenAndInit-hbase:quota-pool-0 {}] regionserver.HRegion(1973): Closed hbase:quota,,1730994509346.72d83c7eae51146f5b941b52ec6566e1. 2024-11-07T15:48:29,842 DEBUG [RegionOpenAndInit-hbase:quota-pool-0 {}] regionserver.HRegion(1676): Region close journal for 72d83c7eae51146f5b941b52ec6566e1: Waiting for close lock at 1730994509841Disabling compacts and flushes for region at 1730994509841Disabling writes for close at 1730994509841Writing region close event to WAL at 1730994509841Closed at 1730994509841 2024-11-07T15:48:29,844 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=hbase:quota execute state=CREATE_TABLE_ADD_TO_META 2024-11-07T15:48:29,851 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"hbase:quota,,1730994509346.72d83c7eae51146f5b941b52ec6566e1.","families":{"info":[{"qualifier":"regioninfo","vlen":37,"tag":[],"timestamp":"1730994509845"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1730994509845"}]},"ts":"1730994509845"} 2024-11-07T15:48:29,857 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-07T15:48:29,859 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=hbase:quota execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-07T15:48:29,862 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:quota","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730994509859"}]},"ts":"1730994509859"} 2024-11-07T15:48:29,866 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:quota, state=ENABLING in hbase:meta 2024-11-07T15:48:29,867 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {c2682f91c1e2=0} racks are {/default-rack=0} 2024-11-07T15:48:29,869 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-07T15:48:29,869 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-07T15:48:29,869 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-07T15:48:29,869 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-07T15:48:29,869 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-07T15:48:29,869 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-07T15:48:29,869 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-07T15:48:29,871 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:quota, region=72d83c7eae51146f5b941b52ec6566e1, ASSIGN}] 2024-11-07T15:48:29,873 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:quota, region=72d83c7eae51146f5b941b52ec6566e1, ASSIGN 2024-11-07T15:48:29,875 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:quota, region=72d83c7eae51146f5b941b52ec6566e1, ASSIGN; state=OFFLINE, location=c2682f91c1e2,37185,1730994506636; forceNewPlan=false, retain=false 2024-11-07T15:48:30,028 INFO [c2682f91c1e2:38893 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-07T15:48:30,029 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=72d83c7eae51146f5b941b52ec6566e1, regionState=OPENING, regionLocation=c2682f91c1e2,37185,1730994506636 2024-11-07T15:48:30,034 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:quota, region=72d83c7eae51146f5b941b52ec6566e1, ASSIGN because future has completed 2024-11-07T15:48:30,035 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 72d83c7eae51146f5b941b52ec6566e1, server=c2682f91c1e2,37185,1730994506636}] 2024-11-07T15:48:30,191 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-07T15:48:30,193 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50737, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-07T15:48:30,199 INFO [RS_OPEN_PRIORITY_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(132): Open hbase:quota,,1730994509346.72d83c7eae51146f5b941b52ec6566e1. 2024-11-07T15:48:30,200 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 72d83c7eae51146f5b941b52ec6566e1, NAME => 'hbase:quota,,1730994509346.72d83c7eae51146f5b941b52ec6566e1.', STARTKEY => '', ENDKEY => ''} 2024-11-07T15:48:30,200 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table quota 72d83c7eae51146f5b941b52ec6566e1 2024-11-07T15:48:30,200 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(898): Instantiated hbase:quota,,1730994509346.72d83c7eae51146f5b941b52ec6566e1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T15:48:30,201 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 72d83c7eae51146f5b941b52ec6566e1 2024-11-07T15:48:30,201 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 72d83c7eae51146f5b941b52ec6566e1 2024-11-07T15:48:30,203 INFO [StoreOpener-72d83c7eae51146f5b941b52ec6566e1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family q of region 72d83c7eae51146f5b941b52ec6566e1 2024-11-07T15:48:30,205 INFO [StoreOpener-72d83c7eae51146f5b941b52ec6566e1-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 72d83c7eae51146f5b941b52ec6566e1 columnFamilyName q 2024-11-07T15:48:30,205 DEBUG [StoreOpener-72d83c7eae51146f5b941b52ec6566e1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T15:48:30,206 INFO [StoreOpener-72d83c7eae51146f5b941b52ec6566e1-1 {}] regionserver.HStore(327): Store=72d83c7eae51146f5b941b52ec6566e1/q, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T15:48:30,207 INFO [StoreOpener-72d83c7eae51146f5b941b52ec6566e1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family u of region 72d83c7eae51146f5b941b52ec6566e1 2024-11-07T15:48:30,209 INFO [StoreOpener-72d83c7eae51146f5b941b52ec6566e1-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 72d83c7eae51146f5b941b52ec6566e1 columnFamilyName u 2024-11-07T15:48:30,209 DEBUG [StoreOpener-72d83c7eae51146f5b941b52ec6566e1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T15:48:30,209 INFO [StoreOpener-72d83c7eae51146f5b941b52ec6566e1-1 {}] regionserver.HStore(327): Store=72d83c7eae51146f5b941b52ec6566e1/u, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T15:48:30,210 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 72d83c7eae51146f5b941b52ec6566e1 2024-11-07T15:48:30,211 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/hbase/quota/72d83c7eae51146f5b941b52ec6566e1 2024-11-07T15:48:30,212 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/hbase/quota/72d83c7eae51146f5b941b52ec6566e1 2024-11-07T15:48:30,213 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 72d83c7eae51146f5b941b52ec6566e1 2024-11-07T15:48:30,213 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 72d83c7eae51146f5b941b52ec6566e1 2024-11-07T15:48:30,214 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:quota descriptor;using region.getMemStoreFlushHeapSize/# of families (64.0 M)) instead. 2024-11-07T15:48:30,216 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 72d83c7eae51146f5b941b52ec6566e1 2024-11-07T15:48:30,220 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/hbase/quota/72d83c7eae51146f5b941b52ec6566e1/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T15:48:30,221 INFO [RS_OPEN_PRIORITY_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1114): Opened 72d83c7eae51146f5b941b52ec6566e1; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73683215, jitterRate=0.09796546399593353}}}, FlushLargeStoresPolicy{flushSizeLowerBound=67108864} 2024-11-07T15:48:30,221 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 72d83c7eae51146f5b941b52ec6566e1 2024-11-07T15:48:30,222 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 72d83c7eae51146f5b941b52ec6566e1: Running coprocessor pre-open hook at 1730994510201Writing region info on filesystem at 1730994510201Initializing all the Stores at 1730994510203 (+2 ms)Instantiating store for column family {NAME => 'q', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730994510203Instantiating store for column family {NAME => 'u', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730994510203Cleaning up temporary data from old regions at 1730994510213 (+10 ms)Running coprocessor post-open hooks at 1730994510221 (+8 ms)Region opened successfully at 1730994510222 (+1 ms) 2024-11-07T15:48:30,224 INFO [RS_OPEN_PRIORITY_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:quota,,1730994509346.72d83c7eae51146f5b941b52ec6566e1., pid=6, masterSystemTime=1730994510190 2024-11-07T15:48:30,228 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:quota,,1730994509346.72d83c7eae51146f5b941b52ec6566e1. 2024-11-07T15:48:30,228 INFO [RS_OPEN_PRIORITY_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(153): Opened hbase:quota,,1730994509346.72d83c7eae51146f5b941b52ec6566e1. 2024-11-07T15:48:30,229 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=72d83c7eae51146f5b941b52ec6566e1, regionState=OPEN, openSeqNum=2, regionLocation=c2682f91c1e2,37185,1730994506636 2024-11-07T15:48:30,233 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 72d83c7eae51146f5b941b52ec6566e1, server=c2682f91c1e2,37185,1730994506636 because future has completed 2024-11-07T15:48:30,239 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-07T15:48:30,239 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 72d83c7eae51146f5b941b52ec6566e1, server=c2682f91c1e2,37185,1730994506636 in 199 msec 2024-11-07T15:48:30,244 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-07T15:48:30,244 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:quota, region=72d83c7eae51146f5b941b52ec6566e1, ASSIGN in 368 msec 2024-11-07T15:48:30,246 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=hbase:quota execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-07T15:48:30,246 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:quota","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730994510246"}]},"ts":"1730994510246"} 2024-11-07T15:48:30,251 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:quota, state=ENABLED in hbase:meta 2024-11-07T15:48:30,253 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=hbase:quota execute state=CREATE_TABLE_POST_OPERATION 2024-11-07T15:48:30,255 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=hbase:quota,, stopping at row=hbase:quota ,, for max=2147483647 with caching=100 2024-11-07T15:48:30,256 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=hbase:quota in 906 msec 2024-11-07T15:48:30,263 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-07T15:48:30,268 DEBUG [RPCClient-NioEventLoopGroup-5-2 {}] client.AsyncConnectionImpl(321): The fetched master address is c2682f91c1e2,38893,1730994505717 2024-11-07T15:48:30,270 DEBUG [RPCClient-NioEventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@5dcc718c 2024-11-07T15:48:30,273 DEBUG [RPCClient-NioEventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-07T15:48:30,275 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34046, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-07T15:48:30,279 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=hbase:quota,, stopping at row=hbase:quota ,, for max=2147483647 with caching=100 2024-11-07T15:48:30,284 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T15:48:30,287 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35390, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T15:48:30,299 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestQuotaAdmin0', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-07T15:48:30,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestQuotaAdmin0 2024-11-07T15:48:30,305 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestQuotaAdmin0 execute state=CREATE_TABLE_PRE_OPERATION 2024-11-07T15:48:30,307 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T15:48:30,307 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestQuotaAdmin0" procId is: 7 2024-11-07T15:48:30,309 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestQuotaAdmin0 execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-07T15:48:30,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-07T15:48:30,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40735 is added to blk_1073741838_1014 (size=391) 2024-11-07T15:48:30,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35461 is added to blk_1073741838_1014 (size=391) 2024-11-07T15:48:30,325 INFO [RegionOpenAndInit-TestQuotaAdmin0-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 79945045cac83aa4484718cd7ff8f1c3, NAME => 'TestQuotaAdmin0,,1730994510294.79945045cac83aa4484718cd7ff8f1c3.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestQuotaAdmin0', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61 2024-11-07T15:48:30,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40735 is added to blk_1073741839_1015 (size=50) 2024-11-07T15:48:30,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35461 is added to blk_1073741839_1015 (size=50) 2024-11-07T15:48:30,338 DEBUG [RegionOpenAndInit-TestQuotaAdmin0-pool-0 {}] regionserver.HRegion(898): Instantiated TestQuotaAdmin0,,1730994510294.79945045cac83aa4484718cd7ff8f1c3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T15:48:30,339 DEBUG [RegionOpenAndInit-TestQuotaAdmin0-pool-0 {}] regionserver.HRegion(1722): Closing 79945045cac83aa4484718cd7ff8f1c3, disabling compactions & flushes 2024-11-07T15:48:30,339 INFO [RegionOpenAndInit-TestQuotaAdmin0-pool-0 {}] regionserver.HRegion(1755): Closing region TestQuotaAdmin0,,1730994510294.79945045cac83aa4484718cd7ff8f1c3. 2024-11-07T15:48:30,339 DEBUG [RegionOpenAndInit-TestQuotaAdmin0-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestQuotaAdmin0,,1730994510294.79945045cac83aa4484718cd7ff8f1c3. 2024-11-07T15:48:30,339 DEBUG [RegionOpenAndInit-TestQuotaAdmin0-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestQuotaAdmin0,,1730994510294.79945045cac83aa4484718cd7ff8f1c3. after waiting 0 ms 2024-11-07T15:48:30,339 DEBUG [RegionOpenAndInit-TestQuotaAdmin0-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestQuotaAdmin0,,1730994510294.79945045cac83aa4484718cd7ff8f1c3. 2024-11-07T15:48:30,339 INFO [RegionOpenAndInit-TestQuotaAdmin0-pool-0 {}] regionserver.HRegion(1973): Closed TestQuotaAdmin0,,1730994510294.79945045cac83aa4484718cd7ff8f1c3. 2024-11-07T15:48:30,339 DEBUG [RegionOpenAndInit-TestQuotaAdmin0-pool-0 {}] regionserver.HRegion(1676): Region close journal for 79945045cac83aa4484718cd7ff8f1c3: Waiting for close lock at 1730994510338Disabling compacts and flushes for region at 1730994510338Disabling writes for close at 1730994510339 (+1 ms)Writing region close event to WAL at 1730994510339Closed at 1730994510339 2024-11-07T15:48:30,341 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestQuotaAdmin0 execute state=CREATE_TABLE_ADD_TO_META 2024-11-07T15:48:30,342 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestQuotaAdmin0,,1730994510294.79945045cac83aa4484718cd7ff8f1c3.","families":{"info":[{"qualifier":"regioninfo","vlen":49,"tag":[],"timestamp":"1730994510341"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1730994510341"}]},"ts":"1730994510341"} 2024-11-07T15:48:30,345 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-07T15:48:30,347 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestQuotaAdmin0 execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-07T15:48:30,348 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin0","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730994510347"}]},"ts":"1730994510347"} 2024-11-07T15:48:30,360 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin0, state=ENABLING in hbase:meta 2024-11-07T15:48:30,360 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {c2682f91c1e2=0} racks are {/default-rack=0} 2024-11-07T15:48:30,362 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-07T15:48:30,362 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-07T15:48:30,362 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-07T15:48:30,362 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-07T15:48:30,362 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-07T15:48:30,362 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-07T15:48:30,362 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-07T15:48:30,363 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin0, region=79945045cac83aa4484718cd7ff8f1c3, ASSIGN}] 2024-11-07T15:48:30,365 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin0, region=79945045cac83aa4484718cd7ff8f1c3, ASSIGN 2024-11-07T15:48:30,367 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestQuotaAdmin0, region=79945045cac83aa4484718cd7ff8f1c3, ASSIGN; state=OFFLINE, location=c2682f91c1e2,37185,1730994506636; forceNewPlan=false, retain=false 2024-11-07T15:48:30,518 INFO [c2682f91c1e2:38893 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-07T15:48:30,518 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=79945045cac83aa4484718cd7ff8f1c3, regionState=OPENING, regionLocation=c2682f91c1e2,37185,1730994506636 2024-11-07T15:48:30,522 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestQuotaAdmin0, region=79945045cac83aa4484718cd7ff8f1c3, ASSIGN because future has completed 2024-11-07T15:48:30,523 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure 79945045cac83aa4484718cd7ff8f1c3, server=c2682f91c1e2,37185,1730994506636}] 2024-11-07T15:48:30,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-07T15:48:30,683 INFO [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] handler.AssignRegionHandler(132): Open TestQuotaAdmin0,,1730994510294.79945045cac83aa4484718cd7ff8f1c3. 2024-11-07T15:48:30,683 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(7752): Opening region: {ENCODED => 79945045cac83aa4484718cd7ff8f1c3, NAME => 'TestQuotaAdmin0,,1730994510294.79945045cac83aa4484718cd7ff8f1c3.', STARTKEY => '', ENDKEY => ''} 2024-11-07T15:48:30,684 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestQuotaAdmin0 79945045cac83aa4484718cd7ff8f1c3 2024-11-07T15:48:30,684 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(898): Instantiated TestQuotaAdmin0,,1730994510294.79945045cac83aa4484718cd7ff8f1c3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T15:48:30,684 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(7794): checking encryption for 79945045cac83aa4484718cd7ff8f1c3 2024-11-07T15:48:30,684 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(7797): checking classloading for 79945045cac83aa4484718cd7ff8f1c3 2024-11-07T15:48:30,686 INFO [StoreOpener-79945045cac83aa4484718cd7ff8f1c3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 79945045cac83aa4484718cd7ff8f1c3 2024-11-07T15:48:30,689 INFO [StoreOpener-79945045cac83aa4484718cd7ff8f1c3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 79945045cac83aa4484718cd7ff8f1c3 columnFamilyName cf 2024-11-07T15:48:30,689 DEBUG [StoreOpener-79945045cac83aa4484718cd7ff8f1c3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T15:48:30,689 INFO [StoreOpener-79945045cac83aa4484718cd7ff8f1c3-1 {}] regionserver.HStore(327): Store=79945045cac83aa4484718cd7ff8f1c3/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T15:48:30,690 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1038): replaying wal for 79945045cac83aa4484718cd7ff8f1c3 2024-11-07T15:48:30,691 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/default/TestQuotaAdmin0/79945045cac83aa4484718cd7ff8f1c3 2024-11-07T15:48:30,691 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/default/TestQuotaAdmin0/79945045cac83aa4484718cd7ff8f1c3 2024-11-07T15:48:30,692 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1048): stopping wal replay for 79945045cac83aa4484718cd7ff8f1c3 2024-11-07T15:48:30,692 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1060): Cleaning up temporary data for 79945045cac83aa4484718cd7ff8f1c3 2024-11-07T15:48:30,695 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1093): writing seq id for 79945045cac83aa4484718cd7ff8f1c3 2024-11-07T15:48:30,698 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/default/TestQuotaAdmin0/79945045cac83aa4484718cd7ff8f1c3/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T15:48:30,699 INFO [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1114): Opened 79945045cac83aa4484718cd7ff8f1c3; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63988836, jitterRate=-0.046492040157318115}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-07T15:48:30,699 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 79945045cac83aa4484718cd7ff8f1c3 2024-11-07T15:48:30,700 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1006): Region open journal for 79945045cac83aa4484718cd7ff8f1c3: Running coprocessor pre-open hook at 1730994510684Writing region info on filesystem at 1730994510684Initializing all the Stores at 1730994510686 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730994510686Cleaning up temporary data from old regions at 1730994510692 (+6 ms)Running coprocessor post-open hooks at 1730994510699 (+7 ms)Region opened successfully at 1730994510700 (+1 ms) 2024-11-07T15:48:30,702 INFO [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegionServer(2236): Post open deploy tasks for TestQuotaAdmin0,,1730994510294.79945045cac83aa4484718cd7ff8f1c3., pid=9, masterSystemTime=1730994510677 2024-11-07T15:48:30,705 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegionServer(2266): Finished post open deploy task for TestQuotaAdmin0,,1730994510294.79945045cac83aa4484718cd7ff8f1c3. 2024-11-07T15:48:30,705 INFO [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] handler.AssignRegionHandler(153): Opened TestQuotaAdmin0,,1730994510294.79945045cac83aa4484718cd7ff8f1c3. 2024-11-07T15:48:30,706 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=79945045cac83aa4484718cd7ff8f1c3, regionState=OPEN, openSeqNum=2, regionLocation=c2682f91c1e2,37185,1730994506636 2024-11-07T15:48:30,709 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure 79945045cac83aa4484718cd7ff8f1c3, server=c2682f91c1e2,37185,1730994506636 because future has completed 2024-11-07T15:48:30,714 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-07T15:48:30,714 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; OpenRegionProcedure 79945045cac83aa4484718cd7ff8f1c3, server=c2682f91c1e2,37185,1730994506636 in 188 msec 2024-11-07T15:48:30,718 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-07T15:48:30,719 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin0, region=79945045cac83aa4484718cd7ff8f1c3, ASSIGN in 352 msec 2024-11-07T15:48:30,720 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestQuotaAdmin0 execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-07T15:48:30,720 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin0","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730994510720"}]},"ts":"1730994510720"} 2024-11-07T15:48:30,724 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin0, state=ENABLED in hbase:meta 2024-11-07T15:48:30,725 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestQuotaAdmin0 execute state=CREATE_TABLE_POST_OPERATION 2024-11-07T15:48:30,728 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestQuotaAdmin0 in 424 msec 2024-11-07T15:48:31,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-07T15:48:31,081 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestQuotaAdmin0 completed 2024-11-07T15:48:31,081 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestQuotaAdmin0 get assigned. Timeout = 60000ms 2024-11-07T15:48:31,082 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-07T15:48:31,087 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestQuotaAdmin0 assigned to meta. Checking AM states. 2024-11-07T15:48:31,087 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-07T15:48:31,088 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestQuotaAdmin0 assigned. 2024-11-07T15:48:31,088 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-11-07T15:48:31,090 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestQuotaAdmin0,, stopping at row=TestQuotaAdmin0 ,, for max=2147483647 with caching=100 2024-11-07T15:48:31,095 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestQuotaAdmin0,, stopping at row=TestQuotaAdmin0 ,, for max=2147483647 with caching=100 2024-11-07T15:48:31,122 DEBUG [regionserver/c2682f91c1e2:0.Chore.1 {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-07T15:48:31,123 DEBUG [regionserver/c2682f91c1e2:0.Chore.1 {}] client.AsyncConnectionImpl(321): The fetched master address is c2682f91c1e2,38893,1730994505717 2024-11-07T15:48:31,123 DEBUG [regionserver/c2682f91c1e2:0.Chore.1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@5b5ae9f8 2024-11-07T15:48:31,126 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestQuotaAdmin1', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-07T15:48:31,128 DEBUG [regionserver/c2682f91c1e2:0.Chore.1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-07T15:48:31,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] procedure2.ProcedureExecutor(1139): Stored pid=10, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestQuotaAdmin1 2024-11-07T15:48:31,130 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46355, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=MasterService 2024-11-07T15:48:31,131 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=10, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestQuotaAdmin1 execute state=CREATE_TABLE_PRE_OPERATION 2024-11-07T15:48:31,132 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T15:48:31,133 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestQuotaAdmin1" procId is: 10 2024-11-07T15:48:31,135 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=10, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestQuotaAdmin1 execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-07T15:48:31,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=10 2024-11-07T15:48:31,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40735 is added to blk_1073741840_1016 (size=391) 2024-11-07T15:48:31,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35461 is added to blk_1073741840_1016 (size=391) 2024-11-07T15:48:31,157 DEBUG [regionserver/c2682f91c1e2:0.Chore.1 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-07T15:48:31,157 DEBUG [regionserver/c2682f91c1e2:0.Chore.1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c2682f91c1e2,34001,1730994506521, seqNum=-1] 2024-11-07T15:48:31,157 DEBUG [regionserver/c2682f91c1e2:0.Chore.1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T15:48:31,159 INFO [RegionOpenAndInit-TestQuotaAdmin1-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 693706afa3a03665aa040eecbb09fb84, NAME => 'TestQuotaAdmin1,,1730994511126.693706afa3a03665aa040eecbb09fb84.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestQuotaAdmin1', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61 2024-11-07T15:48:31,159 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43305, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=ClientService 2024-11-07T15:48:31,165 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:quota', row='n.default', locateType=CURRENT is [region=hbase:quota,,1730994509346.72d83c7eae51146f5b941b52ec6566e1., hostname=c2682f91c1e2,37185,1730994506636, seqNum=2] 2024-11-07T15:48:31,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35461 is added to blk_1073741841_1017 (size=50) 2024-11-07T15:48:31,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40735 is added to blk_1073741841_1017 (size=50) 2024-11-07T15:48:31,171 DEBUG [RegionOpenAndInit-TestQuotaAdmin1-pool-0 {}] regionserver.HRegion(898): Instantiated TestQuotaAdmin1,,1730994511126.693706afa3a03665aa040eecbb09fb84.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T15:48:31,171 DEBUG [RegionOpenAndInit-TestQuotaAdmin1-pool-0 {}] regionserver.HRegion(1722): Closing 693706afa3a03665aa040eecbb09fb84, disabling compactions & flushes 2024-11-07T15:48:31,171 INFO [RegionOpenAndInit-TestQuotaAdmin1-pool-0 {}] regionserver.HRegion(1755): Closing region TestQuotaAdmin1,,1730994511126.693706afa3a03665aa040eecbb09fb84. 2024-11-07T15:48:31,171 DEBUG [RegionOpenAndInit-TestQuotaAdmin1-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestQuotaAdmin1,,1730994511126.693706afa3a03665aa040eecbb09fb84. 2024-11-07T15:48:31,171 DEBUG [RegionOpenAndInit-TestQuotaAdmin1-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestQuotaAdmin1,,1730994511126.693706afa3a03665aa040eecbb09fb84. after waiting 0 ms 2024-11-07T15:48:31,171 DEBUG [RegionOpenAndInit-TestQuotaAdmin1-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestQuotaAdmin1,,1730994511126.693706afa3a03665aa040eecbb09fb84. 2024-11-07T15:48:31,171 INFO [RegionOpenAndInit-TestQuotaAdmin1-pool-0 {}] regionserver.HRegion(1973): Closed TestQuotaAdmin1,,1730994511126.693706afa3a03665aa040eecbb09fb84. 2024-11-07T15:48:31,171 DEBUG [RegionOpenAndInit-TestQuotaAdmin1-pool-0 {}] regionserver.HRegion(1676): Region close journal for 693706afa3a03665aa040eecbb09fb84: Waiting for close lock at 1730994511171Disabling compacts and flushes for region at 1730994511171Disabling writes for close at 1730994511171Writing region close event to WAL at 1730994511171Closed at 1730994511171 2024-11-07T15:48:31,179 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T15:48:31,181 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=10, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestQuotaAdmin1 execute state=CREATE_TABLE_ADD_TO_META 2024-11-07T15:48:31,181 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54023, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=ClientService 2024-11-07T15:48:31,181 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestQuotaAdmin1,,1730994511126.693706afa3a03665aa040eecbb09fb84.","families":{"info":[{"qualifier":"regioninfo","vlen":49,"tag":[],"timestamp":"1730994511181"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1730994511181"}]},"ts":"1730994511181"} 2024-11-07T15:48:31,185 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-07T15:48:31,187 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=10, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestQuotaAdmin1 execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-07T15:48:31,187 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730994511187"}]},"ts":"1730994511187"} 2024-11-07T15:48:31,190 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin1, state=ENABLING in hbase:meta 2024-11-07T15:48:31,191 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {c2682f91c1e2=0} racks are {/default-rack=0} 2024-11-07T15:48:31,192 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-07T15:48:31,192 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-07T15:48:31,192 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-07T15:48:31,192 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-07T15:48:31,192 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-07T15:48:31,192 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-07T15:48:31,192 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-07T15:48:31,192 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin1, region=693706afa3a03665aa040eecbb09fb84, ASSIGN}] 2024-11-07T15:48:31,194 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=10, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin1, region=693706afa3a03665aa040eecbb09fb84, ASSIGN 2024-11-07T15:48:31,200 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=10, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestQuotaAdmin1, region=693706afa3a03665aa040eecbb09fb84, ASSIGN; state=OFFLINE, location=c2682f91c1e2,34001,1730994506521; forceNewPlan=false, retain=false 2024-11-07T15:48:31,351 INFO [c2682f91c1e2:38893 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-07T15:48:31,351 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=693706afa3a03665aa040eecbb09fb84, regionState=OPENING, regionLocation=c2682f91c1e2,34001,1730994506521 2024-11-07T15:48:31,355 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=10, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestQuotaAdmin1, region=693706afa3a03665aa040eecbb09fb84, ASSIGN because future has completed 2024-11-07T15:48:31,356 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 693706afa3a03665aa040eecbb09fb84, server=c2682f91c1e2,34001,1730994506521}] 2024-11-07T15:48:31,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38893 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=10 2024-11-07T15:48:31,515 INFO [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestQuotaAdmin1,,1730994511126.693706afa3a03665aa040eecbb09fb84. 2024-11-07T15:48:31,516 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => 693706afa3a03665aa040eecbb09fb84, NAME => 'TestQuotaAdmin1,,1730994511126.693706afa3a03665aa040eecbb09fb84.', STARTKEY => '', ENDKEY => ''} 2024-11-07T15:48:31,516 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestQuotaAdmin1 693706afa3a03665aa040eecbb09fb84 2024-11-07T15:48:31,516 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestQuotaAdmin1,,1730994511126.693706afa3a03665aa040eecbb09fb84.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T15:48:31,517 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for 693706afa3a03665aa040eecbb09fb84 2024-11-07T15:48:31,517 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for 693706afa3a03665aa040eecbb09fb84 2024-11-07T15:48:31,519 INFO [StoreOpener-693706afa3a03665aa040eecbb09fb84-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 693706afa3a03665aa040eecbb09fb84 2024-11-07T15:48:31,522 INFO [StoreOpener-693706afa3a03665aa040eecbb09fb84-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 693706afa3a03665aa040eecbb09fb84 columnFamilyName cf 2024-11-07T15:48:31,522 DEBUG [StoreOpener-693706afa3a03665aa040eecbb09fb84-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T15:48:31,523 INFO [StoreOpener-693706afa3a03665aa040eecbb09fb84-1 {}] regionserver.HStore(327): Store=693706afa3a03665aa040eecbb09fb84/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T15:48:31,523 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for 693706afa3a03665aa040eecbb09fb84 2024-11-07T15:48:31,524 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/default/TestQuotaAdmin1/693706afa3a03665aa040eecbb09fb84 2024-11-07T15:48:31,525 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/default/TestQuotaAdmin1/693706afa3a03665aa040eecbb09fb84 2024-11-07T15:48:31,526 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for 693706afa3a03665aa040eecbb09fb84 2024-11-07T15:48:31,526 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for 693706afa3a03665aa040eecbb09fb84 2024-11-07T15:48:31,528 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for 693706afa3a03665aa040eecbb09fb84 2024-11-07T15:48:31,532 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/default/TestQuotaAdmin1/693706afa3a03665aa040eecbb09fb84/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T15:48:31,532 INFO [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened 693706afa3a03665aa040eecbb09fb84; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74411460, jitterRate=0.10881716012954712}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-07T15:48:31,533 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 693706afa3a03665aa040eecbb09fb84 2024-11-07T15:48:31,533 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for 693706afa3a03665aa040eecbb09fb84: Running coprocessor pre-open hook at 1730994511517Writing region info on filesystem at 1730994511517Initializing all the Stores at 1730994511518 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730994511518Cleaning up temporary data from old regions at 1730994511526 (+8 ms)Running coprocessor post-open hooks at 1730994511533 (+7 ms)Region opened successfully at 1730994511533 2024-11-07T15:48:31,535 INFO [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestQuotaAdmin1,,1730994511126.693706afa3a03665aa040eecbb09fb84., pid=12, masterSystemTime=1730994511510 2024-11-07T15:48:31,538 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestQuotaAdmin1,,1730994511126.693706afa3a03665aa040eecbb09fb84. 2024-11-07T15:48:31,538 INFO [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestQuotaAdmin1,,1730994511126.693706afa3a03665aa040eecbb09fb84. 2024-11-07T15:48:31,539 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=693706afa3a03665aa040eecbb09fb84, regionState=OPEN, openSeqNum=2, regionLocation=c2682f91c1e2,34001,1730994506521 2024-11-07T15:48:31,543 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 693706afa3a03665aa040eecbb09fb84, server=c2682f91c1e2,34001,1730994506521 because future has completed 2024-11-07T15:48:31,548 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-07T15:48:31,548 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure 693706afa3a03665aa040eecbb09fb84, server=c2682f91c1e2,34001,1730994506521 in 188 msec 2024-11-07T15:48:31,552 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=10 2024-11-07T15:48:31,552 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=10, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin1, region=693706afa3a03665aa040eecbb09fb84, ASSIGN in 356 msec 2024-11-07T15:48:31,553 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=10, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestQuotaAdmin1 execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-07T15:48:31,554 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730994511553"}]},"ts":"1730994511553"} 2024-11-07T15:48:31,557 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin1, state=ENABLED in hbase:meta 2024-11-07T15:48:31,558 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=10, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestQuotaAdmin1 execute state=CREATE_TABLE_POST_OPERATION 2024-11-07T15:48:31,561 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestQuotaAdmin1 in 432 msec 2024-11-07T15:48:31,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38893 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=10 2024-11-07T15:48:31,900 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestQuotaAdmin1 completed 2024-11-07T15:48:31,901 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestQuotaAdmin1 get assigned. Timeout = 60000ms 2024-11-07T15:48:31,901 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-07T15:48:31,906 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestQuotaAdmin1 assigned to meta. Checking AM states. 2024-11-07T15:48:31,907 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-07T15:48:31,907 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestQuotaAdmin1 assigned. 2024-11-07T15:48:31,907 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-11-07T15:48:31,910 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestQuotaAdmin1,, stopping at row=TestQuotaAdmin1 ,, for max=2147483647 with caching=100 2024-11-07T15:48:31,916 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestQuotaAdmin1,, stopping at row=TestQuotaAdmin1 ,, for max=2147483647 with caching=100 2024-11-07T15:48:31,920 DEBUG [regionserver/c2682f91c1e2:0.Chore.1 {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-07T15:48:31,921 DEBUG [regionserver/c2682f91c1e2:0.Chore.1 {}] client.AsyncConnectionImpl(321): The fetched master address is c2682f91c1e2,38893,1730994505717 2024-11-07T15:48:31,921 DEBUG [regionserver/c2682f91c1e2:0.Chore.1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@77816a16 2024-11-07T15:48:31,921 DEBUG [regionserver/c2682f91c1e2:0.Chore.1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-07T15:48:31,922 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42915, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=MasterService 2024-11-07T15:48:31,924 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestQuotaAdmin2', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-07T15:48:31,925 DEBUG [regionserver/c2682f91c1e2:0.Chore.1 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-07T15:48:31,925 DEBUG [regionserver/c2682f91c1e2:0.Chore.1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c2682f91c1e2,34001,1730994506521, seqNum=-1] 2024-11-07T15:48:31,925 DEBUG [regionserver/c2682f91c1e2:0.Chore.1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T15:48:31,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestQuotaAdmin2 2024-11-07T15:48:31,927 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37485, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=ClientService 2024-11-07T15:48:31,927 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=13, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestQuotaAdmin2 execute state=CREATE_TABLE_PRE_OPERATION 2024-11-07T15:48:31,928 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T15:48:31,928 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestQuotaAdmin2" procId is: 13 2024-11-07T15:48:31,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-07T15:48:31,931 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=13, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestQuotaAdmin2 execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-07T15:48:31,931 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:quota', row='n.default', locateType=CURRENT is [region=hbase:quota,,1730994509346.72d83c7eae51146f5b941b52ec6566e1., hostname=c2682f91c1e2,37185,1730994506636, seqNum=2] 2024-11-07T15:48:31,932 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T15:48:31,933 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40803, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=ClientService 2024-11-07T15:48:31,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35461 is added to blk_1073741842_1018 (size=391) 2024-11-07T15:48:31,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40735 is added to blk_1073741842_1018 (size=391) 2024-11-07T15:48:31,951 INFO [RegionOpenAndInit-TestQuotaAdmin2-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => b60425053afc90dabdcdd8551fa12ad1, NAME => 'TestQuotaAdmin2,,1730994511923.b60425053afc90dabdcdd8551fa12ad1.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestQuotaAdmin2', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61 2024-11-07T15:48:31,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35461 is added to blk_1073741843_1019 (size=50) 2024-11-07T15:48:31,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40735 is added to blk_1073741843_1019 (size=50) 2024-11-07T15:48:31,963 DEBUG [RegionOpenAndInit-TestQuotaAdmin2-pool-0 {}] regionserver.HRegion(898): Instantiated TestQuotaAdmin2,,1730994511923.b60425053afc90dabdcdd8551fa12ad1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T15:48:31,964 DEBUG [RegionOpenAndInit-TestQuotaAdmin2-pool-0 {}] regionserver.HRegion(1722): Closing b60425053afc90dabdcdd8551fa12ad1, disabling compactions & flushes 2024-11-07T15:48:31,964 INFO [RegionOpenAndInit-TestQuotaAdmin2-pool-0 {}] regionserver.HRegion(1755): Closing region TestQuotaAdmin2,,1730994511923.b60425053afc90dabdcdd8551fa12ad1. 2024-11-07T15:48:31,964 DEBUG [RegionOpenAndInit-TestQuotaAdmin2-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestQuotaAdmin2,,1730994511923.b60425053afc90dabdcdd8551fa12ad1. 2024-11-07T15:48:31,964 DEBUG [RegionOpenAndInit-TestQuotaAdmin2-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestQuotaAdmin2,,1730994511923.b60425053afc90dabdcdd8551fa12ad1. after waiting 0 ms 2024-11-07T15:48:31,964 DEBUG [RegionOpenAndInit-TestQuotaAdmin2-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestQuotaAdmin2,,1730994511923.b60425053afc90dabdcdd8551fa12ad1. 2024-11-07T15:48:31,964 INFO [RegionOpenAndInit-TestQuotaAdmin2-pool-0 {}] regionserver.HRegion(1973): Closed TestQuotaAdmin2,,1730994511923.b60425053afc90dabdcdd8551fa12ad1. 2024-11-07T15:48:31,964 DEBUG [RegionOpenAndInit-TestQuotaAdmin2-pool-0 {}] regionserver.HRegion(1676): Region close journal for b60425053afc90dabdcdd8551fa12ad1: Waiting for close lock at 1730994511964Disabling compacts and flushes for region at 1730994511964Disabling writes for close at 1730994511964Writing region close event to WAL at 1730994511964Closed at 1730994511964 2024-11-07T15:48:31,968 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=13, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestQuotaAdmin2 execute state=CREATE_TABLE_ADD_TO_META 2024-11-07T15:48:31,968 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestQuotaAdmin2,,1730994511923.b60425053afc90dabdcdd8551fa12ad1.","families":{"info":[{"qualifier":"regioninfo","vlen":49,"tag":[],"timestamp":"1730994511968"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1730994511968"}]},"ts":"1730994511968"} 2024-11-07T15:48:31,971 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-07T15:48:31,973 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=13, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestQuotaAdmin2 execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-07T15:48:31,973 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin2","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730994511973"}]},"ts":"1730994511973"} 2024-11-07T15:48:31,976 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin2, state=ENABLING in hbase:meta 2024-11-07T15:48:31,976 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {c2682f91c1e2=0} racks are {/default-rack=0} 2024-11-07T15:48:31,978 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-07T15:48:31,978 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-07T15:48:31,978 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-07T15:48:31,978 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-07T15:48:31,978 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-07T15:48:31,978 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-07T15:48:31,978 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-07T15:48:31,978 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin2, region=b60425053afc90dabdcdd8551fa12ad1, ASSIGN}] 2024-11-07T15:48:31,980 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=14, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin2, region=b60425053afc90dabdcdd8551fa12ad1, ASSIGN 2024-11-07T15:48:31,982 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=14, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestQuotaAdmin2, region=b60425053afc90dabdcdd8551fa12ad1, ASSIGN; state=OFFLINE, location=c2682f91c1e2,34001,1730994506521; forceNewPlan=false, retain=false 2024-11-07T15:48:32,133 INFO [c2682f91c1e2:38893 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-07T15:48:32,133 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=14 updating hbase:meta row=b60425053afc90dabdcdd8551fa12ad1, regionState=OPENING, regionLocation=c2682f91c1e2,34001,1730994506521 2024-11-07T15:48:32,136 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=14, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestQuotaAdmin2, region=b60425053afc90dabdcdd8551fa12ad1, ASSIGN because future has completed 2024-11-07T15:48:32,137 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE, hasLock=false; OpenRegionProcedure b60425053afc90dabdcdd8551fa12ad1, server=c2682f91c1e2,34001,1730994506521}] 2024-11-07T15:48:32,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-07T15:48:32,301 INFO [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] handler.AssignRegionHandler(132): Open TestQuotaAdmin2,,1730994511923.b60425053afc90dabdcdd8551fa12ad1. 2024-11-07T15:48:32,301 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(7752): Opening region: {ENCODED => b60425053afc90dabdcdd8551fa12ad1, NAME => 'TestQuotaAdmin2,,1730994511923.b60425053afc90dabdcdd8551fa12ad1.', STARTKEY => '', ENDKEY => ''} 2024-11-07T15:48:32,302 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestQuotaAdmin2 b60425053afc90dabdcdd8551fa12ad1 2024-11-07T15:48:32,302 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(898): Instantiated TestQuotaAdmin2,,1730994511923.b60425053afc90dabdcdd8551fa12ad1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T15:48:32,302 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(7794): checking encryption for b60425053afc90dabdcdd8551fa12ad1 2024-11-07T15:48:32,302 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(7797): checking classloading for b60425053afc90dabdcdd8551fa12ad1 2024-11-07T15:48:32,305 INFO [StoreOpener-b60425053afc90dabdcdd8551fa12ad1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region b60425053afc90dabdcdd8551fa12ad1 2024-11-07T15:48:32,307 INFO [StoreOpener-b60425053afc90dabdcdd8551fa12ad1-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b60425053afc90dabdcdd8551fa12ad1 columnFamilyName cf 2024-11-07T15:48:32,307 DEBUG [StoreOpener-b60425053afc90dabdcdd8551fa12ad1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T15:48:32,309 INFO [StoreOpener-b60425053afc90dabdcdd8551fa12ad1-1 {}] regionserver.HStore(327): Store=b60425053afc90dabdcdd8551fa12ad1/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T15:48:32,309 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1038): replaying wal for b60425053afc90dabdcdd8551fa12ad1 2024-11-07T15:48:32,310 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/default/TestQuotaAdmin2/b60425053afc90dabdcdd8551fa12ad1 2024-11-07T15:48:32,311 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/default/TestQuotaAdmin2/b60425053afc90dabdcdd8551fa12ad1 2024-11-07T15:48:32,312 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1048): stopping wal replay for b60425053afc90dabdcdd8551fa12ad1 2024-11-07T15:48:32,312 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1060): Cleaning up temporary data for b60425053afc90dabdcdd8551fa12ad1 2024-11-07T15:48:32,315 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1093): writing seq id for b60425053afc90dabdcdd8551fa12ad1 2024-11-07T15:48:32,322 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/default/TestQuotaAdmin2/b60425053afc90dabdcdd8551fa12ad1/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T15:48:32,323 INFO [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1114): Opened b60425053afc90dabdcdd8551fa12ad1; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63876968, jitterRate=-0.048159003257751465}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-07T15:48:32,323 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b60425053afc90dabdcdd8551fa12ad1 2024-11-07T15:48:32,324 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1006): Region open journal for b60425053afc90dabdcdd8551fa12ad1: Running coprocessor pre-open hook at 1730994512303Writing region info on filesystem at 1730994512303Initializing all the Stores at 1730994512304 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730994512304Cleaning up temporary data from old regions at 1730994512312 (+8 ms)Running coprocessor post-open hooks at 1730994512323 (+11 ms)Region opened successfully at 1730994512323 2024-11-07T15:48:32,325 INFO [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegionServer(2236): Post open deploy tasks for TestQuotaAdmin2,,1730994511923.b60425053afc90dabdcdd8551fa12ad1., pid=15, masterSystemTime=1730994512294 2024-11-07T15:48:32,330 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegionServer(2266): Finished post open deploy task for TestQuotaAdmin2,,1730994511923.b60425053afc90dabdcdd8551fa12ad1. 2024-11-07T15:48:32,330 INFO [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] handler.AssignRegionHandler(153): Opened TestQuotaAdmin2,,1730994511923.b60425053afc90dabdcdd8551fa12ad1. 2024-11-07T15:48:32,331 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=14 updating hbase:meta row=b60425053afc90dabdcdd8551fa12ad1, regionState=OPEN, openSeqNum=2, regionLocation=c2682f91c1e2,34001,1730994506521 2024-11-07T15:48:32,335 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=15, ppid=14, state=RUNNABLE, hasLock=false; OpenRegionProcedure b60425053afc90dabdcdd8551fa12ad1, server=c2682f91c1e2,34001,1730994506521 because future has completed 2024-11-07T15:48:32,343 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=15, resume processing ppid=14 2024-11-07T15:48:32,343 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=15, ppid=14, state=SUCCESS, hasLock=false; OpenRegionProcedure b60425053afc90dabdcdd8551fa12ad1, server=c2682f91c1e2,34001,1730994506521 in 201 msec 2024-11-07T15:48:32,347 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-07T15:48:32,347 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin2, region=b60425053afc90dabdcdd8551fa12ad1, ASSIGN in 365 msec 2024-11-07T15:48:32,349 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=13, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestQuotaAdmin2 execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-07T15:48:32,349 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin2","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730994512349"}]},"ts":"1730994512349"} 2024-11-07T15:48:32,352 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin2, state=ENABLED in hbase:meta 2024-11-07T15:48:32,354 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=13, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestQuotaAdmin2 execute state=CREATE_TABLE_POST_OPERATION 2024-11-07T15:48:32,357 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestQuotaAdmin2 in 430 msec 2024-11-07T15:48:32,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-07T15:48:32,700 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestQuotaAdmin2 completed 2024-11-07T15:48:32,700 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestQuotaAdmin2 get assigned. Timeout = 60000ms 2024-11-07T15:48:32,701 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-07T15:48:32,705 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestQuotaAdmin2 assigned to meta. Checking AM states. 2024-11-07T15:48:32,705 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-07T15:48:32,706 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestQuotaAdmin2 assigned. 2024-11-07T15:48:32,706 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-11-07T15:48:32,708 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestQuotaAdmin2,, stopping at row=TestQuotaAdmin2 ,, for max=2147483647 with caching=100 2024-11-07T15:48:32,712 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestQuotaAdmin2,, stopping at row=TestQuotaAdmin2 ,, for max=2147483647 with caching=100 2024-11-07T15:48:32,721 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] master.HMaster$18(3529): Client=jenkins//172.17.0.2 creating {NAME => 'TestNs'} 2024-11-07T15:48:32,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] procedure2.ProcedureExecutor(1139): Stored pid=16, state=RUNNABLE:CREATE_NAMESPACE_PREPARE, hasLock=false; CreateNamespaceProcedure, namespace=TestNs 2024-11-07T15:48:32,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=16 2024-11-07T15:48:32,739 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=16, state=SUCCESS, hasLock=false; CreateNamespaceProcedure, namespace=TestNs in 16 msec 2024-11-07T15:48:32,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=16 2024-11-07T15:48:32,991 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$NamespaceProcedureBiConsumer(2745): Operation: CREATE_NAMESPACE, Namespace: TestNs completed 2024-11-07T15:48:32,992 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestNs:TestTable', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-07T15:48:32,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] procedure2.ProcedureExecutor(1139): Stored pid=17, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestNs:TestTable 2024-11-07T15:48:32,996 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=17, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestNs:TestTable execute state=CREATE_TABLE_PRE_OPERATION 2024-11-07T15:48:32,996 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T15:48:32,996 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "TestNs" qualifier: "TestTable" procId is: 17 2024-11-07T15:48:32,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=17 2024-11-07T15:48:32,998 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=17, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestNs:TestTable execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-07T15:48:33,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35461 is added to blk_1073741844_1020 (size=358) 2024-11-07T15:48:33,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40735 is added to blk_1073741844_1020 (size=358) 2024-11-07T15:48:33,017 INFO [RegionOpenAndInit-TestNs:TestTable-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 448b38f9959837a3b8eeeee19aa6a85d, NAME => 'TestNs:TestTable,,1730994512992.448b38f9959837a3b8eeeee19aa6a85d.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='TestNs:TestTable', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61 2024-11-07T15:48:33,017 INFO [RegionOpenAndInit-TestNs:TestTable-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 7319af31ce83708d5bbd35fc3fa17e8e, NAME => 'TestNs:TestTable,1,1730994512992.7319af31ce83708d5bbd35fc3fa17e8e.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='TestNs:TestTable', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61 2024-11-07T15:48:33,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40735 is added to blk_1073741846_1022 (size=44) 2024-11-07T15:48:33,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35461 is added to blk_1073741846_1022 (size=44) 2024-11-07T15:48:33,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40735 is added to blk_1073741845_1021 (size=44) 2024-11-07T15:48:33,042 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-1 {}] regionserver.HRegion(898): Instantiated TestNs:TestTable,1,1730994512992.7319af31ce83708d5bbd35fc3fa17e8e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T15:48:33,043 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-1 {}] regionserver.HRegion(1722): Closing 7319af31ce83708d5bbd35fc3fa17e8e, disabling compactions & flushes 2024-11-07T15:48:33,043 INFO [RegionOpenAndInit-TestNs:TestTable-pool-1 {}] regionserver.HRegion(1755): Closing region TestNs:TestTable,1,1730994512992.7319af31ce83708d5bbd35fc3fa17e8e. 2024-11-07T15:48:33,043 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestNs:TestTable,1,1730994512992.7319af31ce83708d5bbd35fc3fa17e8e. 2024-11-07T15:48:33,043 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on TestNs:TestTable,1,1730994512992.7319af31ce83708d5bbd35fc3fa17e8e. after waiting 0 ms 2024-11-07T15:48:33,043 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region TestNs:TestTable,1,1730994512992.7319af31ce83708d5bbd35fc3fa17e8e. 2024-11-07T15:48:33,043 INFO [RegionOpenAndInit-TestNs:TestTable-pool-1 {}] regionserver.HRegion(1973): Closed TestNs:TestTable,1,1730994512992.7319af31ce83708d5bbd35fc3fa17e8e. 2024-11-07T15:48:33,043 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-1 {}] regionserver.HRegion(1676): Region close journal for 7319af31ce83708d5bbd35fc3fa17e8e: Waiting for close lock at 1730994513043Disabling compacts and flushes for region at 1730994513043Disabling writes for close at 1730994513043Writing region close event to WAL at 1730994513043Closed at 1730994513043 2024-11-07T15:48:33,044 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-0 {}] regionserver.HRegion(898): Instantiated TestNs:TestTable,,1730994512992.448b38f9959837a3b8eeeee19aa6a85d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T15:48:33,044 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-0 {}] regionserver.HRegion(1722): Closing 448b38f9959837a3b8eeeee19aa6a85d, disabling compactions & flushes 2024-11-07T15:48:33,044 INFO [RegionOpenAndInit-TestNs:TestTable-pool-0 {}] regionserver.HRegion(1755): Closing region TestNs:TestTable,,1730994512992.448b38f9959837a3b8eeeee19aa6a85d. 2024-11-07T15:48:33,044 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestNs:TestTable,,1730994512992.448b38f9959837a3b8eeeee19aa6a85d. 2024-11-07T15:48:33,045 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestNs:TestTable,,1730994512992.448b38f9959837a3b8eeeee19aa6a85d. after waiting 0 ms 2024-11-07T15:48:33,045 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestNs:TestTable,,1730994512992.448b38f9959837a3b8eeeee19aa6a85d. 2024-11-07T15:48:33,045 INFO [RegionOpenAndInit-TestNs:TestTable-pool-0 {}] regionserver.HRegion(1973): Closed TestNs:TestTable,,1730994512992.448b38f9959837a3b8eeeee19aa6a85d. 2024-11-07T15:48:33,045 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-0 {}] regionserver.HRegion(1676): Region close journal for 448b38f9959837a3b8eeeee19aa6a85d: Waiting for close lock at 1730994513044Disabling compacts and flushes for region at 1730994513044Disabling writes for close at 1730994513045 (+1 ms)Writing region close event to WAL at 1730994513045Closed at 1730994513045 2024-11-07T15:48:33,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35461 is added to blk_1073741845_1021 (size=44) 2024-11-07T15:48:33,047 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=17, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestNs:TestTable execute state=CREATE_TABLE_ADD_TO_META 2024-11-07T15:48:33,047 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestNs:TestTable,1,1730994512992.7319af31ce83708d5bbd35fc3fa17e8e.","families":{"info":[{"qualifier":"regioninfo","vlen":43,"tag":[],"timestamp":"1730994513047"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1730994513047"}]},"ts":"1730994513047"} 2024-11-07T15:48:33,047 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestNs:TestTable,,1730994512992.448b38f9959837a3b8eeeee19aa6a85d.","families":{"info":[{"qualifier":"regioninfo","vlen":43,"tag":[],"timestamp":"1730994513047"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1730994513047"}]},"ts":"1730994513047"} 2024-11-07T15:48:33,054 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-07T15:48:33,056 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=17, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestNs:TestTable execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-07T15:48:33,057 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestNs:TestTable","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730994513056"}]},"ts":"1730994513056"} 2024-11-07T15:48:33,059 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=TestNs:TestTable, state=ENABLING in hbase:meta 2024-11-07T15:48:33,060 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {c2682f91c1e2=0} racks are {/default-rack=0} 2024-11-07T15:48:33,062 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-07T15:48:33,062 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-07T15:48:33,062 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-07T15:48:33,062 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-07T15:48:33,063 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-07T15:48:33,063 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-07T15:48:33,063 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-07T15:48:33,063 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=18, ppid=17, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=448b38f9959837a3b8eeeee19aa6a85d, ASSIGN}, {pid=19, ppid=17, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=7319af31ce83708d5bbd35fc3fa17e8e, ASSIGN}] 2024-11-07T15:48:33,065 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=19, ppid=17, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=7319af31ce83708d5bbd35fc3fa17e8e, ASSIGN 2024-11-07T15:48:33,065 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=18, ppid=17, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=448b38f9959837a3b8eeeee19aa6a85d, ASSIGN 2024-11-07T15:48:33,068 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=19, ppid=17, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestNs:TestTable, region=7319af31ce83708d5bbd35fc3fa17e8e, ASSIGN; state=OFFLINE, location=c2682f91c1e2,37185,1730994506636; forceNewPlan=false, retain=false 2024-11-07T15:48:33,068 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=18, ppid=17, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestNs:TestTable, region=448b38f9959837a3b8eeeee19aa6a85d, ASSIGN; state=OFFLINE, location=c2682f91c1e2,34001,1730994506521; forceNewPlan=false, retain=false 2024-11-07T15:48:33,219 INFO [c2682f91c1e2:38893 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-07T15:48:33,220 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=18 updating hbase:meta row=448b38f9959837a3b8eeeee19aa6a85d, regionState=OPENING, regionLocation=c2682f91c1e2,34001,1730994506521 2024-11-07T15:48:33,220 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=19 updating hbase:meta row=7319af31ce83708d5bbd35fc3fa17e8e, regionState=OPENING, regionLocation=c2682f91c1e2,37185,1730994506636 2024-11-07T15:48:33,224 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=19, ppid=17, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestNs:TestTable, region=7319af31ce83708d5bbd35fc3fa17e8e, ASSIGN because future has completed 2024-11-07T15:48:33,224 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=20, ppid=19, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7319af31ce83708d5bbd35fc3fa17e8e, server=c2682f91c1e2,37185,1730994506636}] 2024-11-07T15:48:33,225 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=18, ppid=17, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestNs:TestTable, region=448b38f9959837a3b8eeeee19aa6a85d, ASSIGN because future has completed 2024-11-07T15:48:33,226 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=21, ppid=18, state=RUNNABLE, hasLock=false; OpenRegionProcedure 448b38f9959837a3b8eeeee19aa6a85d, server=c2682f91c1e2,34001,1730994506521}] 2024-11-07T15:48:33,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=17 2024-11-07T15:48:33,383 INFO [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] handler.AssignRegionHandler(132): Open TestNs:TestTable,1,1730994512992.7319af31ce83708d5bbd35fc3fa17e8e. 2024-11-07T15:48:33,384 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7752): Opening region: {ENCODED => 7319af31ce83708d5bbd35fc3fa17e8e, NAME => 'TestNs:TestTable,1,1730994512992.7319af31ce83708d5bbd35fc3fa17e8e.', STARTKEY => '1', ENDKEY => ''} 2024-11-07T15:48:33,384 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestTable 7319af31ce83708d5bbd35fc3fa17e8e 2024-11-07T15:48:33,384 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(898): Instantiated TestNs:TestTable,1,1730994512992.7319af31ce83708d5bbd35fc3fa17e8e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T15:48:33,384 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7794): checking encryption for 7319af31ce83708d5bbd35fc3fa17e8e 2024-11-07T15:48:33,384 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7797): checking classloading for 7319af31ce83708d5bbd35fc3fa17e8e 2024-11-07T15:48:33,385 INFO [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] handler.AssignRegionHandler(132): Open TestNs:TestTable,,1730994512992.448b38f9959837a3b8eeeee19aa6a85d. 2024-11-07T15:48:33,385 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(7752): Opening region: {ENCODED => 448b38f9959837a3b8eeeee19aa6a85d, NAME => 'TestNs:TestTable,,1730994512992.448b38f9959837a3b8eeeee19aa6a85d.', STARTKEY => '', ENDKEY => '1'} 2024-11-07T15:48:33,385 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestTable 448b38f9959837a3b8eeeee19aa6a85d 2024-11-07T15:48:33,385 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(898): Instantiated TestNs:TestTable,,1730994512992.448b38f9959837a3b8eeeee19aa6a85d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-07T15:48:33,386 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(7794): checking encryption for 448b38f9959837a3b8eeeee19aa6a85d 2024-11-07T15:48:33,386 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(7797): checking classloading for 448b38f9959837a3b8eeeee19aa6a85d 2024-11-07T15:48:33,386 INFO [StoreOpener-7319af31ce83708d5bbd35fc3fa17e8e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 7319af31ce83708d5bbd35fc3fa17e8e 2024-11-07T15:48:33,387 INFO [StoreOpener-448b38f9959837a3b8eeeee19aa6a85d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 448b38f9959837a3b8eeeee19aa6a85d 2024-11-07T15:48:33,388 INFO [StoreOpener-7319af31ce83708d5bbd35fc3fa17e8e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7319af31ce83708d5bbd35fc3fa17e8e columnFamilyName cf 2024-11-07T15:48:33,389 DEBUG [StoreOpener-7319af31ce83708d5bbd35fc3fa17e8e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T15:48:33,389 INFO [StoreOpener-448b38f9959837a3b8eeeee19aa6a85d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 448b38f9959837a3b8eeeee19aa6a85d columnFamilyName cf 2024-11-07T15:48:33,389 INFO [StoreOpener-7319af31ce83708d5bbd35fc3fa17e8e-1 {}] regionserver.HStore(327): Store=7319af31ce83708d5bbd35fc3fa17e8e/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T15:48:33,389 DEBUG [StoreOpener-448b38f9959837a3b8eeeee19aa6a85d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-07T15:48:33,389 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1038): replaying wal for 7319af31ce83708d5bbd35fc3fa17e8e 2024-11-07T15:48:33,390 INFO [StoreOpener-448b38f9959837a3b8eeeee19aa6a85d-1 {}] regionserver.HStore(327): Store=448b38f9959837a3b8eeeee19aa6a85d/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-07T15:48:33,390 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1038): replaying wal for 448b38f9959837a3b8eeeee19aa6a85d 2024-11-07T15:48:33,390 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/TestNs/TestTable/7319af31ce83708d5bbd35fc3fa17e8e 2024-11-07T15:48:33,391 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/TestNs/TestTable/7319af31ce83708d5bbd35fc3fa17e8e 2024-11-07T15:48:33,391 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/TestNs/TestTable/448b38f9959837a3b8eeeee19aa6a85d 2024-11-07T15:48:33,391 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1048): stopping wal replay for 7319af31ce83708d5bbd35fc3fa17e8e 2024-11-07T15:48:33,392 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1060): Cleaning up temporary data for 7319af31ce83708d5bbd35fc3fa17e8e 2024-11-07T15:48:33,392 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/TestNs/TestTable/448b38f9959837a3b8eeeee19aa6a85d 2024-11-07T15:48:33,392 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1048): stopping wal replay for 448b38f9959837a3b8eeeee19aa6a85d 2024-11-07T15:48:33,392 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1060): Cleaning up temporary data for 448b38f9959837a3b8eeeee19aa6a85d 2024-11-07T15:48:33,394 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1093): writing seq id for 7319af31ce83708d5bbd35fc3fa17e8e 2024-11-07T15:48:33,394 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1093): writing seq id for 448b38f9959837a3b8eeeee19aa6a85d 2024-11-07T15:48:33,397 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/TestNs/TestTable/7319af31ce83708d5bbd35fc3fa17e8e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T15:48:33,397 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/TestNs/TestTable/448b38f9959837a3b8eeeee19aa6a85d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-07T15:48:33,398 INFO [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1114): Opened 7319af31ce83708d5bbd35fc3fa17e8e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68359539, jitterRate=0.018636509776115417}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-07T15:48:33,398 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 7319af31ce83708d5bbd35fc3fa17e8e 2024-11-07T15:48:33,398 INFO [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1114): Opened 448b38f9959837a3b8eeeee19aa6a85d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72887066, jitterRate=0.08610191941261292}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-07T15:48:33,398 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 448b38f9959837a3b8eeeee19aa6a85d 2024-11-07T15:48:33,399 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1006): Region open journal for 448b38f9959837a3b8eeeee19aa6a85d: Running coprocessor pre-open hook at 1730994513386Writing region info on filesystem at 1730994513386Initializing all the Stores at 1730994513387 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730994513387Cleaning up temporary data from old regions at 1730994513392 (+5 ms)Running coprocessor post-open hooks at 1730994513398 (+6 ms)Region opened successfully at 1730994513398 2024-11-07T15:48:33,399 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1006): Region open journal for 7319af31ce83708d5bbd35fc3fa17e8e: Running coprocessor pre-open hook at 1730994513385Writing region info on filesystem at 1730994513385Initializing all the Stores at 1730994513386 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1730994513386Cleaning up temporary data from old regions at 1730994513392 (+6 ms)Running coprocessor post-open hooks at 1730994513398 (+6 ms)Region opened successfully at 1730994513398 2024-11-07T15:48:33,400 INFO [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegionServer(2236): Post open deploy tasks for TestNs:TestTable,1,1730994512992.7319af31ce83708d5bbd35fc3fa17e8e., pid=20, masterSystemTime=1730994513378 2024-11-07T15:48:33,400 INFO [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegionServer(2236): Post open deploy tasks for TestNs:TestTable,,1730994512992.448b38f9959837a3b8eeeee19aa6a85d., pid=21, masterSystemTime=1730994513380 2024-11-07T15:48:33,403 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegionServer(2266): Finished post open deploy task for TestNs:TestTable,,1730994512992.448b38f9959837a3b8eeeee19aa6a85d. 2024-11-07T15:48:33,403 INFO [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] handler.AssignRegionHandler(153): Opened TestNs:TestTable,,1730994512992.448b38f9959837a3b8eeeee19aa6a85d. 2024-11-07T15:48:33,404 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=18 updating hbase:meta row=448b38f9959837a3b8eeeee19aa6a85d, regionState=OPEN, openSeqNum=2, regionLocation=c2682f91c1e2,34001,1730994506521 2024-11-07T15:48:33,404 DEBUG [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegionServer(2266): Finished post open deploy task for TestNs:TestTable,1,1730994512992.7319af31ce83708d5bbd35fc3fa17e8e. 2024-11-07T15:48:33,404 INFO [RS_OPEN_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] handler.AssignRegionHandler(153): Opened TestNs:TestTable,1,1730994512992.7319af31ce83708d5bbd35fc3fa17e8e. 2024-11-07T15:48:33,405 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=19 updating hbase:meta row=7319af31ce83708d5bbd35fc3fa17e8e, regionState=OPEN, openSeqNum=2, regionLocation=c2682f91c1e2,37185,1730994506636 2024-11-07T15:48:33,407 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=21, ppid=18, state=RUNNABLE, hasLock=false; OpenRegionProcedure 448b38f9959837a3b8eeeee19aa6a85d, server=c2682f91c1e2,34001,1730994506521 because future has completed 2024-11-07T15:48:33,408 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=20, ppid=19, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7319af31ce83708d5bbd35fc3fa17e8e, server=c2682f91c1e2,37185,1730994506636 because future has completed 2024-11-07T15:48:33,412 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=21, resume processing ppid=18 2024-11-07T15:48:33,412 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=21, ppid=18, state=SUCCESS, hasLock=false; OpenRegionProcedure 448b38f9959837a3b8eeeee19aa6a85d, server=c2682f91c1e2,34001,1730994506521 in 183 msec 2024-11-07T15:48:33,413 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=20, resume processing ppid=19 2024-11-07T15:48:33,413 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=20, ppid=19, state=SUCCESS, hasLock=false; OpenRegionProcedure 7319af31ce83708d5bbd35fc3fa17e8e, server=c2682f91c1e2,37185,1730994506636 in 185 msec 2024-11-07T15:48:33,414 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=18, ppid=17, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=448b38f9959837a3b8eeeee19aa6a85d, ASSIGN in 349 msec 2024-11-07T15:48:33,417 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=19, resume processing ppid=17 2024-11-07T15:48:33,417 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=19, ppid=17, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=7319af31ce83708d5bbd35fc3fa17e8e, ASSIGN in 350 msec 2024-11-07T15:48:33,418 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=17, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestNs:TestTable execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-07T15:48:33,418 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestNs:TestTable","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730994513418"}]},"ts":"1730994513418"} 2024-11-07T15:48:33,420 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=TestNs:TestTable, state=ENABLED in hbase:meta 2024-11-07T15:48:33,421 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=17, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestNs:TestTable execute state=CREATE_TABLE_POST_OPERATION 2024-11-07T15:48:33,424 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=17, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestNs:TestTable in 429 msec 2024-11-07T15:48:33,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=17 2024-11-07T15:48:33,770 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: TestNs:TestTable completed 2024-11-07T15:48:33,770 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestNs:TestTable get assigned. Timeout = 60000ms 2024-11-07T15:48:33,770 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-07T15:48:33,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestNs:TestTable assigned to meta. Checking AM states. 2024-11-07T15:48:33,775 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-07T15:48:33,775 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestNs:TestTable assigned. 2024-11-07T15:48:33,775 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-11-07T15:48:33,777 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestNs:TestTable,, stopping at row=TestNs:TestTable ,, for max=2147483647 with caching=100 2024-11-07T15:48:33,782 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestNs:TestTable,, stopping at row=TestNs:TestTable ,, for max=2147483647 with caching=100 2024-11-07T15:48:33,807 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: quotas.TestClusterScopeQuotaThrottle#testUserTableClusterScopeQuota Thread=302, OpenFileDescriptor=547, MaxFileDescriptor=1048576, SystemLoadAverage=222, ProcessCount=11, AvailableMemoryMB=6695 2024-11-07T15:48:33,821 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:quota', row='u.jenkins', locateType=CURRENT is [region=hbase:quota,,1730994509346.72d83c7eae51146f5b941b52ec6566e1., hostname=c2682f91c1e2,37185,1730994506636, seqNum=2] 2024-11-07T15:48:33,822 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-07T15:48:33,823 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47223, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-07T15:48:34,084 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-07T15:48:34,084 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-07T15:48:34,085 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1730998113833 bypass), TestNs=QuotaState(ts=1730998113833 bypass)} 2024-11-07T15:48:34,085 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestNs:TestTable=QuotaState(ts=1730998113833 bypass), TestQuotaAdmin2=QuotaState(ts=1730998113833 bypass), TestQuotaAdmin1=QuotaState(ts=1730998113833 bypass)} 2024-11-07T15:48:34,085 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1730998113833 [ TestNs:TestTable ])} 2024-11-07T15:48:34,085 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1730998113833 bypass)} 2024-11-07T15:48:34,335 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-07T15:48:34,336 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-07T15:48:34,336 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1730998113833 bypass), TestNs=QuotaState(ts=1730998113833 bypass)} 2024-11-07T15:48:34,336 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestQuotaAdmin0=QuotaState(ts=1730998113833 bypass), TestNs:TestTable=QuotaState(ts=1730998113833 bypass)} 2024-11-07T15:48:34,336 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1730998113833 [ TestNs:TestTable ])} 2024-11-07T15:48:34,336 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1730998113833 bypass)} 2024-11-07T15:48:34,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37185 {}] quotas.RegionServerRpcQuotaManager(245): Throttling exception for user=jenkins table=TestNs:TestTable numWrites=0 numReads=1: number of read requests exceeded - wait 6mins, 0ms 2024-11-07T15:48:34,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37185 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Get size: 118 connection: 172.17.0.2:35390 deadline: 1730994524354, exception=org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms 2024-11-07T15:48:34,364 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestNs:TestTable,1,1730994512992.7319af31ce83708d5bbd35fc3fa17e8e., hostname=c2682f91c1e2,37185,1730994506636, seqNum=-1 , the old value is region=TestNs:TestTable,1,1730994512992.7319af31ce83708d5bbd35fc3fa17e8e., hostname=c2682f91c1e2,37185,1730994506636, seqNum=-1, error=org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-07T15:48:34,365 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestNs:TestTable,1,1730994512992.7319af31ce83708d5bbd35fc3fa17e8e., hostname=c2682f91c1e2,37185,1730994506636, seqNum=-1 is org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-07T15:48:34,365 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestNs:TestTable,1,1730994512992.7319af31ce83708d5bbd35fc3fa17e8e., hostname=c2682f91c1e2,37185,1730994506636, seqNum=-1 because the exception is null or not the one we care about 2024-11-07T15:48:34,365 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] backoff.HBaseServerExceptionPauseManager(61): RpcThrottlingException suggested pause of 360000000000ns which would exceed the timeout. We should throw instead. org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-07T15:48:34,367 ERROR [Time-limited test {}] quotas.ThrottleQuotaTestUtil(110): get failed after nRetries=10 org.apache.hadoop.hbase.client.RetriesExhaustedException: Failed after attempts=1, exceptions: 2024-11-07T15:48:34.365Z, org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.TableOverAsyncTable.get(TableOverAsyncTable.java:188) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doGets(ThrottleQuotaTestUtil.java:105) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.testUserTableClusterScopeQuota(TestClusterScopeQuotaThrottle.java:224) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] at --------Future.get--------(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.completeExceptionally(AsyncRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.tryScheduleRetry(AsyncRpcRetryingCaller.java:130) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:215) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 42 more Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-07T15:48:34,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37185 {}] quotas.RegionServerRpcQuotaManager(245): Throttling exception for user=jenkins table=TestNs:TestTable numWrites=0 numReads=1: number of read requests exceeded - wait 6mins, 0ms 2024-11-07T15:48:34,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37185 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Get size: 117 connection: 172.17.0.2:35390 deadline: 1730994524369, exception=org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms 2024-11-07T15:48:34,372 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestNs:TestTable,1,1730994512992.7319af31ce83708d5bbd35fc3fa17e8e., hostname=c2682f91c1e2,37185,1730994506636, seqNum=-1 , the old value is region=TestNs:TestTable,1,1730994512992.7319af31ce83708d5bbd35fc3fa17e8e., hostname=c2682f91c1e2,37185,1730994506636, seqNum=-1, error=org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-07T15:48:34,372 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestNs:TestTable,1,1730994512992.7319af31ce83708d5bbd35fc3fa17e8e., hostname=c2682f91c1e2,37185,1730994506636, seqNum=-1 is org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-07T15:48:34,372 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestNs:TestTable,1,1730994512992.7319af31ce83708d5bbd35fc3fa17e8e., hostname=c2682f91c1e2,37185,1730994506636, seqNum=-1 because the exception is null or not the one we care about 2024-11-07T15:48:34,372 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] backoff.HBaseServerExceptionPauseManager(61): RpcThrottlingException suggested pause of 360000000000ns which would exceed the timeout. We should throw instead. org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-07T15:48:34,373 ERROR [Time-limited test {}] quotas.ThrottleQuotaTestUtil(110): get failed after nRetries=0 org.apache.hadoop.hbase.client.RetriesExhaustedException: Failed after attempts=1, exceptions: 2024-11-07T15:48:34.372Z, org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.TableOverAsyncTable.get(TableOverAsyncTable.java:188) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doGets(ThrottleQuotaTestUtil.java:105) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.testUserTableClusterScopeQuota(TestClusterScopeQuotaThrottle.java:224) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] at --------Future.get--------(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.completeExceptionally(AsyncRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.tryScheduleRetry(AsyncRpcRetryingCaller.java:130) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:215) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 42 more Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-07T15:48:34,613 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-07T15:48:34,636 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-07T15:48:34,636 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-07T15:48:34,636 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731001713833 bypass), TestNs=QuotaState(ts=1731001713833 bypass)} 2024-11-07T15:48:34,636 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestNs:TestTable=QuotaState(ts=1731001713833 bypass), TestQuotaAdmin2=QuotaState(ts=1731001713833 bypass), TestQuotaAdmin1=QuotaState(ts=1731001713833 bypass)} 2024-11-07T15:48:34,636 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1731001713833 bypass)} 2024-11-07T15:48:34,636 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731001713833 bypass)} 2024-11-07T15:48:34,654 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-07T15:48:34,656 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestNs:TestTable' 2024-11-07T15:48:34,656 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestQuotaAdmin2' 2024-11-07T15:48:34,657 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestQuotaAdmin1' 2024-11-07T15:48:34,658 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestQuotaAdmin0' 2024-11-07T15:48:34,658 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:quota' 2024-11-07T15:48:34,887 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-07T15:48:34,887 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-07T15:48:34,887 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731001713833 bypass), TestNs=QuotaState(ts=1731001713833 bypass)} 2024-11-07T15:48:34,887 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestQuotaAdmin0=QuotaState(ts=1731001713833 bypass), TestNs:TestTable=QuotaState(ts=1731001713833 bypass)} 2024-11-07T15:48:34,887 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1731001713833 bypass)} 2024-11-07T15:48:34,887 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731001713833 bypass)} 2024-11-07T15:48:34,900 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: quotas.TestClusterScopeQuotaThrottle#testUserTableClusterScopeQuota Thread=302 (was 302), OpenFileDescriptor=549 (was 547) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=204 (was 222), ProcessCount=11 (was 11), AvailableMemoryMB=6681 (was 6695) 2024-11-07T15:48:34,909 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: quotas.TestClusterScopeQuotaThrottle#testUserNamespaceClusterScopeQuota Thread=302, OpenFileDescriptor=549, MaxFileDescriptor=1048576, SystemLoadAverage=204, ProcessCount=11, AvailableMemoryMB=6681 2024-11-07T15:48:35,172 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-07T15:48:35,172 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-11-07T15:48:35,423 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-07T15:48:35,423 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731005313933 bypass), TestNs=QuotaState(ts=1731005313933 bypass)} 2024-11-07T15:48:35,423 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestNs:TestTable=QuotaState(ts=1731005313933 bypass), TestQuotaAdmin2=QuotaState(ts=1731005313933 bypass), TestQuotaAdmin1=QuotaState(ts=1731005313933 bypass)} 2024-11-07T15:48:35,423 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1731005313933 [ default ])} 2024-11-07T15:48:35,423 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731005313933 bypass)} 2024-11-07T15:48:35,674 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-07T15:48:35,674 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-11-07T15:48:35,925 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-07T15:48:35,925 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731005314033 bypass), TestNs=QuotaState(ts=1731005314033 bypass)} 2024-11-07T15:48:35,925 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestQuotaAdmin0=QuotaState(ts=1731005314033 bypass), TestNs:TestTable=QuotaState(ts=1731005314033 bypass)} 2024-11-07T15:48:35,925 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1731005314033 [ default ])} 2024-11-07T15:48:35,925 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731005314033 bypass)} 2024-11-07T15:48:35,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37185 {}] quotas.RegionServerRpcQuotaManager(245): Throttling exception for user=jenkins table=TestQuotaAdmin0 numWrites=0 numReads=1: number of read requests exceeded - wait 12sec, 0ms 2024-11-07T15:48:35,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37185 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Get size: 115 connection: 172.17.0.2:35390 deadline: 1730994525932, exception=org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 12sec, 0ms 2024-11-07T15:48:35,934 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestQuotaAdmin0,,1730994510294.79945045cac83aa4484718cd7ff8f1c3., hostname=c2682f91c1e2,37185,1730994506636, seqNum=-1 , the old value is region=TestQuotaAdmin0,,1730994510294.79945045cac83aa4484718cd7ff8f1c3., hostname=c2682f91c1e2,37185,1730994506636, seqNum=-1, error=org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-07T15:48:35,934 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestQuotaAdmin0,,1730994510294.79945045cac83aa4484718cd7ff8f1c3., hostname=c2682f91c1e2,37185,1730994506636, seqNum=-1 is org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-07T15:48:35,934 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestQuotaAdmin0,,1730994510294.79945045cac83aa4484718cd7ff8f1c3., hostname=c2682f91c1e2,37185,1730994506636, seqNum=-1 because the exception is null or not the one we care about 2024-11-07T15:48:35,934 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] backoff.HBaseServerExceptionPauseManager(61): RpcThrottlingException suggested pause of 12000000000ns which would exceed the timeout. We should throw instead. org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-07T15:48:35,935 ERROR [Time-limited test {}] quotas.ThrottleQuotaTestUtil(110): get failed after nRetries=5 org.apache.hadoop.hbase.client.RetriesExhaustedException: Failed after attempts=1, exceptions: 2024-11-07T15:48:35.934Z, org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.TableOverAsyncTable.get(TableOverAsyncTable.java:188) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doGets(ThrottleQuotaTestUtil.java:105) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.testUserNamespaceClusterScopeQuota(TestClusterScopeQuotaThrottle.java:199) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] at --------Future.get--------(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.completeExceptionally(AsyncRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.tryScheduleRetry(AsyncRpcRetryingCaller.java:130) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:215) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 42 more Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-07T15:48:35,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37185 {}] quotas.RegionServerRpcQuotaManager(245): Throttling exception for user=jenkins table=TestQuotaAdmin0 numWrites=1 numReads=0: number of write requests exceeded - wait 10sec, 0ms 2024-11-07T15:48:35,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37185 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 143 connection: 172.17.0.2:35390 deadline: 1730994525951, exception=org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms 2024-11-07T15:48:35,953 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestQuotaAdmin0,,1730994510294.79945045cac83aa4484718cd7ff8f1c3., hostname=c2682f91c1e2,37185,1730994506636, seqNum=-1 , the old value is region=TestQuotaAdmin0,,1730994510294.79945045cac83aa4484718cd7ff8f1c3., hostname=c2682f91c1e2,37185,1730994506636, seqNum=-1, error=org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-07T15:48:35,954 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestQuotaAdmin0,,1730994510294.79945045cac83aa4484718cd7ff8f1c3., hostname=c2682f91c1e2,37185,1730994506636, seqNum=-1 is org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-07T15:48:35,954 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestQuotaAdmin0,,1730994510294.79945045cac83aa4484718cd7ff8f1c3., hostname=c2682f91c1e2,37185,1730994506636, seqNum=-1 because the exception is null or not the one we care about 2024-11-07T15:48:35,954 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] backoff.HBaseServerExceptionPauseManager(61): RpcThrottlingException suggested pause of 10000000000ns which would exceed the timeout. We should throw instead. org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-07T15:48:35,954 ERROR [Time-limited test {}] quotas.ThrottleQuotaTestUtil(86): put failed after nRetries=6 org.apache.hadoop.hbase.client.RetriesExhaustedException: Failed after attempts=1, exceptions: 2024-11-07T15:48:35.954Z, org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.TableOverAsyncTable.put(TableOverAsyncTable.java:213) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doPuts(ThrottleQuotaTestUtil.java:81) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doPuts(ThrottleQuotaTestUtil.java:64) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.testUserNamespaceClusterScopeQuota(TestClusterScopeQuotaThrottle.java:200) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] at --------Future.get--------(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.completeExceptionally(AsyncRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.tryScheduleRetry(AsyncRpcRetryingCaller.java:130) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:215) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 42 more Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-07T15:48:36,211 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-07T15:48:36,211 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-07T15:48:36,211 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731008914033 bypass), TestNs=QuotaState(ts=1731008914033 bypass)} 2024-11-07T15:48:36,211 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestNs:TestTable=QuotaState(ts=1731008914033 bypass), TestQuotaAdmin2=QuotaState(ts=1731008914033 bypass), TestQuotaAdmin1=QuotaState(ts=1731008914033 bypass)} 2024-11-07T15:48:36,211 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1731008914033 bypass)} 2024-11-07T15:48:36,211 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731008914033 bypass)} 2024-11-07T15:48:36,217 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-07T15:48:36,217 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-07T15:48:36,219 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestQuotaAdmin0 2024-11-07T15:48:36,219 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestQuotaAdmin0 Metrics about Tables on a single HBase RegionServer 2024-11-07T15:48:36,220 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestQuotaAdmin2 2024-11-07T15:48:36,220 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestQuotaAdmin2 Metrics about Tables on a single HBase RegionServer 2024-11-07T15:48:36,221 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_quota 2024-11-07T15:48:36,221 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_quota Metrics about Tables on a single HBase RegionServer 2024-11-07T15:48:36,222 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-07T15:48:36,222 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-07T15:48:36,224 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_TestNs_table_TestTable 2024-11-07T15:48:36,224 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_TestNs_table_TestTable Metrics about Tables on a single HBase RegionServer 2024-11-07T15:48:36,225 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.quotas.MasterQuotasObserver 2024-11-07T15:48:36,225 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.quotas.MasterQuotasObserver Metrics about HBase MasterObservers 2024-11-07T15:48:36,225 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-07T15:48:36,225 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-07T15:48:36,226 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestQuotaAdmin1 2024-11-07T15:48:36,226 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestQuotaAdmin1 Metrics about Tables on a single HBase RegionServer 2024-11-07T15:48:36,462 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-07T15:48:36,462 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-07T15:48:36,462 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731008914033 bypass), TestNs=QuotaState(ts=1731008914033 bypass)} 2024-11-07T15:48:36,462 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestQuotaAdmin0=QuotaState(ts=1731008914033 bypass), TestNs:TestTable=QuotaState(ts=1731008914033 bypass)} 2024-11-07T15:48:36,462 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1731008914033 bypass)} 2024-11-07T15:48:36,462 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731008914033 bypass)} 2024-11-07T15:48:36,474 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: quotas.TestClusterScopeQuotaThrottle#testUserNamespaceClusterScopeQuota Thread=301 (was 302), OpenFileDescriptor=547 (was 549), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=204 (was 204), ProcessCount=11 (was 11), AvailableMemoryMB=6674 (was 6681) 2024-11-07T15:48:36,483 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: quotas.TestClusterScopeQuotaThrottle#testUserClusterScopeQuota Thread=301, OpenFileDescriptor=547, MaxFileDescriptor=1048576, SystemLoadAverage=204, ProcessCount=11, AvailableMemoryMB=6673 2024-11-07T15:48:36,745 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-07T15:48:36,746 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-11-07T15:48:36,996 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-07T15:48:36,996 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731012514133 bypass), TestNs=QuotaState(ts=1731012514133 bypass)} 2024-11-07T15:48:36,996 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestNs:TestTable=QuotaState(ts=1731012514133 bypass), TestQuotaAdmin2=QuotaState(ts=1731012514133 bypass), TestQuotaAdmin1=QuotaState(ts=1731012514133 bypass)} 2024-11-07T15:48:36,996 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1731012514133 global-limiter)} 2024-11-07T15:48:36,996 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731012514133 bypass)} 2024-11-07T15:48:37,247 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-07T15:48:37,247 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-11-07T15:48:37,498 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-07T15:48:37,498 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731012514233 bypass), TestNs=QuotaState(ts=1731012514233 bypass)} 2024-11-07T15:48:37,498 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestQuotaAdmin0=QuotaState(ts=1731012514233 bypass), TestNs:TestTable=QuotaState(ts=1731012514233 bypass)} 2024-11-07T15:48:37,498 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1731012514233 global-limiter)} 2024-11-07T15:48:37,498 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731012514233 bypass)} 2024-11-07T15:48:37,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37185 {}] quotas.RegionServerRpcQuotaManager(245): Throttling exception for user=jenkins table=TestQuotaAdmin0 numWrites=1 numReads=0: number of write requests exceeded - wait 10sec, 0ms 2024-11-07T15:48:37,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37185 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 143 connection: 172.17.0.2:35390 deadline: 1730994527514, exception=org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms 2024-11-07T15:48:37,516 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestQuotaAdmin0,,1730994510294.79945045cac83aa4484718cd7ff8f1c3., hostname=c2682f91c1e2,37185,1730994506636, seqNum=-1 , the old value is region=TestQuotaAdmin0,,1730994510294.79945045cac83aa4484718cd7ff8f1c3., hostname=c2682f91c1e2,37185,1730994506636, seqNum=-1, error=org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-07T15:48:37,516 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestQuotaAdmin0,,1730994510294.79945045cac83aa4484718cd7ff8f1c3., hostname=c2682f91c1e2,37185,1730994506636, seqNum=-1 is org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-07T15:48:37,516 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestQuotaAdmin0,,1730994510294.79945045cac83aa4484718cd7ff8f1c3., hostname=c2682f91c1e2,37185,1730994506636, seqNum=-1 because the exception is null or not the one we care about 2024-11-07T15:48:37,516 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] backoff.HBaseServerExceptionPauseManager(61): RpcThrottlingException suggested pause of 10000000000ns which would exceed the timeout. We should throw instead. org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-07T15:48:37,517 ERROR [Time-limited test {}] quotas.ThrottleQuotaTestUtil(86): put failed after nRetries=6 org.apache.hadoop.hbase.client.RetriesExhaustedException: Failed after attempts=1, exceptions: 2024-11-07T15:48:37.516Z, org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.TableOverAsyncTable.put(TableOverAsyncTable.java:213) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doPuts(ThrottleQuotaTestUtil.java:81) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doPuts(ThrottleQuotaTestUtil.java:64) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.testUserClusterScopeQuota(TestClusterScopeQuotaThrottle.java:178) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] at --------Future.get--------(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.completeExceptionally(AsyncRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.tryScheduleRetry(AsyncRpcRetryingCaller.java:130) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:215) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 42 more Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-07T15:48:37,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37185 {}] quotas.RegionServerRpcQuotaManager(245): Throttling exception for user=jenkins table=TestQuotaAdmin0 numWrites=0 numReads=1: number of read requests exceeded - wait 20sec, 0ms 2024-11-07T15:48:37,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37185 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Get size: 115 connection: 172.17.0.2:35390 deadline: 1730994527523, exception=org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 20sec, 0ms 2024-11-07T15:48:37,525 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestQuotaAdmin0,,1730994510294.79945045cac83aa4484718cd7ff8f1c3., hostname=c2682f91c1e2,37185,1730994506636, seqNum=-1 , the old value is region=TestQuotaAdmin0,,1730994510294.79945045cac83aa4484718cd7ff8f1c3., hostname=c2682f91c1e2,37185,1730994506636, seqNum=-1, error=org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 20sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-07T15:48:37,525 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestQuotaAdmin0,,1730994510294.79945045cac83aa4484718cd7ff8f1c3., hostname=c2682f91c1e2,37185,1730994506636, seqNum=-1 is org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 20sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-07T15:48:37,525 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestQuotaAdmin0,,1730994510294.79945045cac83aa4484718cd7ff8f1c3., hostname=c2682f91c1e2,37185,1730994506636, seqNum=-1 because the exception is null or not the one we care about 2024-11-07T15:48:37,525 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] backoff.HBaseServerExceptionPauseManager(61): RpcThrottlingException suggested pause of 20000000000ns which would exceed the timeout. We should throw instead. org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 20sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 20sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-07T15:48:37,526 ERROR [Time-limited test {}] quotas.ThrottleQuotaTestUtil(110): get failed after nRetries=3 org.apache.hadoop.hbase.client.RetriesExhaustedException: Failed after attempts=1, exceptions: 2024-11-07T15:48:37.525Z, org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 20sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.TableOverAsyncTable.get(TableOverAsyncTable.java:188) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doGets(ThrottleQuotaTestUtil.java:105) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.testUserClusterScopeQuota(TestClusterScopeQuotaThrottle.java:179) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] at --------Future.get--------(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.completeExceptionally(AsyncRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.tryScheduleRetry(AsyncRpcRetryingCaller.java:130) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:215) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 20sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 42 more Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 20sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-07T15:48:37,782 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-07T15:48:37,783 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-07T15:48:37,783 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731016114233 bypass), TestNs=QuotaState(ts=1731016114233 bypass)} 2024-11-07T15:48:37,783 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestNs:TestTable=QuotaState(ts=1731016114233 bypass), TestQuotaAdmin2=QuotaState(ts=1731016114233 bypass), TestQuotaAdmin1=QuotaState(ts=1731016114233 bypass)} 2024-11-07T15:48:37,783 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1731016114233 bypass)} 2024-11-07T15:48:37,783 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731016114233 bypass)} 2024-11-07T15:48:38,033 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-07T15:48:38,033 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-07T15:48:38,033 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731016114233 bypass), TestNs=QuotaState(ts=1731016114233 bypass)} 2024-11-07T15:48:38,033 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestQuotaAdmin0=QuotaState(ts=1731016114233 bypass), TestNs:TestTable=QuotaState(ts=1731016114233 bypass)} 2024-11-07T15:48:38,033 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1731016114233 bypass)} 2024-11-07T15:48:38,033 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731016114233 bypass)} 2024-11-07T15:48:38,044 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: quotas.TestClusterScopeQuotaThrottle#testUserClusterScopeQuota Thread=300 (was 301), OpenFileDescriptor=546 (was 547), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=204 (was 204), ProcessCount=11 (was 11), AvailableMemoryMB=6661 (was 6673) 2024-11-07T15:48:38,053 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: quotas.TestClusterScopeQuotaThrottle#testTableClusterScopeQuota Thread=300, OpenFileDescriptor=546, MaxFileDescriptor=1048576, SystemLoadAverage=204, ProcessCount=11, AvailableMemoryMB=6661 2024-11-07T15:48:38,311 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-07T15:48:38,311 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-07T15:48:38,311 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731019714233 bypass), TestNs=QuotaState(ts=1731019714233 bypass)} 2024-11-07T15:48:38,314 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestNs:TestTable=QuotaState(ts=1731019714233 TimeBasedLimiter( readReqs=AverageIntervalRateLimiter(avail=10 limit=10 tunit=3600000))), TestQuotaAdmin2=QuotaState(ts=1731019714233 bypass), TestQuotaAdmin1=QuotaState(ts=1731019714233 bypass)} 2024-11-07T15:48:38,314 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {} 2024-11-07T15:48:38,314 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731019714233 bypass)} 2024-11-07T15:48:38,564 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-07T15:48:38,564 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-07T15:48:38,564 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731019714233 bypass), TestNs=QuotaState(ts=1731019714233 bypass)} 2024-11-07T15:48:38,564 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestQuotaAdmin0=QuotaState(ts=1731019714233 bypass), TestNs:TestTable=QuotaState(ts=1731019714233 TimeBasedLimiter( readReqs=AverageIntervalRateLimiter(avail=10 limit=10 tunit=3600000)))} 2024-11-07T15:48:38,564 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {} 2024-11-07T15:48:38,564 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731019714233 bypass)} 2024-11-07T15:48:38,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37185 {}] quotas.RegionServerRpcQuotaManager(245): Throttling exception for user=jenkins table=TestNs:TestTable numWrites=0 numReads=1: number of read requests exceeded - wait 6mins, 0ms 2024-11-07T15:48:38,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37185 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Get size: 118 connection: 172.17.0.2:35390 deadline: 1730994528579, exception=org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms 2024-11-07T15:48:38,581 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestNs:TestTable,1,1730994512992.7319af31ce83708d5bbd35fc3fa17e8e., hostname=c2682f91c1e2,37185,1730994506636, seqNum=-1 , the old value is region=TestNs:TestTable,1,1730994512992.7319af31ce83708d5bbd35fc3fa17e8e., hostname=c2682f91c1e2,37185,1730994506636, seqNum=-1, error=org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-07T15:48:38,582 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestNs:TestTable,1,1730994512992.7319af31ce83708d5bbd35fc3fa17e8e., hostname=c2682f91c1e2,37185,1730994506636, seqNum=-1 is org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-07T15:48:38,582 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestNs:TestTable,1,1730994512992.7319af31ce83708d5bbd35fc3fa17e8e., hostname=c2682f91c1e2,37185,1730994506636, seqNum=-1 because the exception is null or not the one we care about 2024-11-07T15:48:38,582 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] backoff.HBaseServerExceptionPauseManager(61): RpcThrottlingException suggested pause of 360000000000ns which would exceed the timeout. We should throw instead. org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-07T15:48:38,582 ERROR [Time-limited test {}] quotas.ThrottleQuotaTestUtil(110): get failed after nRetries=10 org.apache.hadoop.hbase.client.RetriesExhaustedException: Failed after attempts=1, exceptions: 2024-11-07T15:48:38.582Z, org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.TableOverAsyncTable.get(TableOverAsyncTable.java:188) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doGets(ThrottleQuotaTestUtil.java:105) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.testTableClusterScopeQuota(TestClusterScopeQuotaThrottle.java:151) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] at --------Future.get--------(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.completeExceptionally(AsyncRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.tryScheduleRetry(AsyncRpcRetryingCaller.java:130) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:215) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 42 more Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-07T15:48:38,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37185 {}] quotas.RegionServerRpcQuotaManager(245): Throttling exception for user=jenkins table=TestNs:TestTable numWrites=0 numReads=1: number of read requests exceeded - wait 6mins, 0ms 2024-11-07T15:48:38,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37185 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Get size: 117 connection: 172.17.0.2:35390 deadline: 1730994528583, exception=org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms 2024-11-07T15:48:38,585 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestNs:TestTable,1,1730994512992.7319af31ce83708d5bbd35fc3fa17e8e., hostname=c2682f91c1e2,37185,1730994506636, seqNum=-1 , the old value is region=TestNs:TestTable,1,1730994512992.7319af31ce83708d5bbd35fc3fa17e8e., hostname=c2682f91c1e2,37185,1730994506636, seqNum=-1, error=org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-07T15:48:38,585 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestNs:TestTable,1,1730994512992.7319af31ce83708d5bbd35fc3fa17e8e., hostname=c2682f91c1e2,37185,1730994506636, seqNum=-1 is org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-07T15:48:38,585 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestNs:TestTable,1,1730994512992.7319af31ce83708d5bbd35fc3fa17e8e., hostname=c2682f91c1e2,37185,1730994506636, seqNum=-1 because the exception is null or not the one we care about 2024-11-07T15:48:38,585 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] backoff.HBaseServerExceptionPauseManager(61): RpcThrottlingException suggested pause of 360000000000ns which would exceed the timeout. We should throw instead. org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-07T15:48:38,586 ERROR [Time-limited test {}] quotas.ThrottleQuotaTestUtil(110): get failed after nRetries=0 org.apache.hadoop.hbase.client.RetriesExhaustedException: Failed after attempts=1, exceptions: 2024-11-07T15:48:38.585Z, org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.TableOverAsyncTable.get(TableOverAsyncTable.java:188) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doGets(ThrottleQuotaTestUtil.java:105) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.testTableClusterScopeQuota(TestClusterScopeQuotaThrottle.java:151) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] at --------Future.get--------(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.completeExceptionally(AsyncRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.tryScheduleRetry(AsyncRpcRetryingCaller.java:130) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:215) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 42 more Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-07T15:48:38,844 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-07T15:48:38,844 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-07T15:48:38,844 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731023314233 bypass), TestNs=QuotaState(ts=1731023314233 bypass)} 2024-11-07T15:48:38,844 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestNs:TestTable=QuotaState(ts=1731023314233 bypass), TestQuotaAdmin2=QuotaState(ts=1731023314233 bypass), TestQuotaAdmin1=QuotaState(ts=1731023314233 bypass)} 2024-11-07T15:48:38,844 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {} 2024-11-07T15:48:38,845 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731023314233 bypass)} 2024-11-07T15:48:39,095 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-07T15:48:39,095 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-07T15:48:39,095 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731023314233 bypass), TestNs=QuotaState(ts=1731023314233 bypass)} 2024-11-07T15:48:39,095 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestQuotaAdmin0=QuotaState(ts=1731023314233 bypass), TestNs:TestTable=QuotaState(ts=1731023314233 bypass)} 2024-11-07T15:48:39,095 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1731023314233 bypass)} 2024-11-07T15:48:39,095 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731023314233 bypass)} 2024-11-07T15:48:39,106 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: quotas.TestClusterScopeQuotaThrottle#testTableClusterScopeQuota Thread=300 (was 300), OpenFileDescriptor=545 (was 546), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=204 (was 204), ProcessCount=11 (was 11), AvailableMemoryMB=6659 (was 6661) 2024-11-07T15:48:39,115 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: quotas.TestClusterScopeQuotaThrottle#testNamespaceClusterScopeQuota Thread=300, OpenFileDescriptor=545, MaxFileDescriptor=1048576, SystemLoadAverage=204, ProcessCount=11, AvailableMemoryMB=6659 2024-11-07T15:48:39,376 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-07T15:48:39,377 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-07T15:48:39,377 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731026914233 TimeBasedLimiter( writeReqs=AverageIntervalRateLimiter(avail=5 limit=5 tunit=60000) readReqs=AverageIntervalRateLimiter(avail=6 limit=6 tunit=60000))), TestNs=QuotaState(ts=1731026914233 bypass)} 2024-11-07T15:48:39,377 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestNs:TestTable=QuotaState(ts=1731026914233 bypass), TestQuotaAdmin2=QuotaState(ts=1731026914233 bypass), TestQuotaAdmin1=QuotaState(ts=1731026914233 bypass)} 2024-11-07T15:48:39,377 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {} 2024-11-07T15:48:39,377 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731026914233 bypass)} 2024-11-07T15:48:39,627 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-07T15:48:39,627 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-07T15:48:39,627 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731026914233 TimeBasedLimiter( writeReqs=AverageIntervalRateLimiter(avail=5 limit=5 tunit=60000) readReqs=AverageIntervalRateLimiter(avail=6 limit=6 tunit=60000))), TestNs=QuotaState(ts=1731026914233 bypass)} 2024-11-07T15:48:39,628 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestQuotaAdmin0=QuotaState(ts=1731026914233 bypass), TestNs:TestTable=QuotaState(ts=1731026914233 bypass)} 2024-11-07T15:48:39,628 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {} 2024-11-07T15:48:39,628 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731026914233 bypass)} 2024-11-07T15:48:39,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37185 {}] quotas.RegionServerRpcQuotaManager(245): Throttling exception for user=jenkins table=TestQuotaAdmin0 numWrites=1 numReads=0: number of write requests exceeded - wait 12sec, 0ms 2024-11-07T15:48:39,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37185 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 143 connection: 172.17.0.2:35390 deadline: 1730994529641, exception=org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 12sec, 0ms 2024-11-07T15:48:39,643 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestQuotaAdmin0,,1730994510294.79945045cac83aa4484718cd7ff8f1c3., hostname=c2682f91c1e2,37185,1730994506636, seqNum=-1 , the old value is region=TestQuotaAdmin0,,1730994510294.79945045cac83aa4484718cd7ff8f1c3., hostname=c2682f91c1e2,37185,1730994506636, seqNum=-1, error=org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-07T15:48:39,643 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestQuotaAdmin0,,1730994510294.79945045cac83aa4484718cd7ff8f1c3., hostname=c2682f91c1e2,37185,1730994506636, seqNum=-1 is org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-07T15:48:39,643 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestQuotaAdmin0,,1730994510294.79945045cac83aa4484718cd7ff8f1c3., hostname=c2682f91c1e2,37185,1730994506636, seqNum=-1 because the exception is null or not the one we care about 2024-11-07T15:48:39,644 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] backoff.HBaseServerExceptionPauseManager(61): RpcThrottlingException suggested pause of 12000000000ns which would exceed the timeout. We should throw instead. org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-07T15:48:39,644 ERROR [Time-limited test {}] quotas.ThrottleQuotaTestUtil(86): put failed after nRetries=5 org.apache.hadoop.hbase.client.RetriesExhaustedException: Failed after attempts=1, exceptions: 2024-11-07T15:48:39.643Z, org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.TableOverAsyncTable.put(TableOverAsyncTable.java:213) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doPuts(ThrottleQuotaTestUtil.java:81) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doPuts(ThrottleQuotaTestUtil.java:64) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.testNamespaceClusterScopeQuota(TestClusterScopeQuotaThrottle.java:128) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] at --------Future.get--------(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.completeExceptionally(AsyncRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.tryScheduleRetry(AsyncRpcRetryingCaller.java:130) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:215) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 42 more Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-07T15:48:39,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37185 {}] quotas.RegionServerRpcQuotaManager(245): Throttling exception for user=jenkins table=TestQuotaAdmin0 numWrites=0 numReads=1: number of read requests exceeded - wait 10sec, 0ms 2024-11-07T15:48:39,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37185 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Get size: 116 connection: 172.17.0.2:35390 deadline: 1730994529655, exception=org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 10sec, 0ms 2024-11-07T15:48:39,657 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestQuotaAdmin0,,1730994510294.79945045cac83aa4484718cd7ff8f1c3., hostname=c2682f91c1e2,37185,1730994506636, seqNum=-1 , the old value is region=TestQuotaAdmin0,,1730994510294.79945045cac83aa4484718cd7ff8f1c3., hostname=c2682f91c1e2,37185,1730994506636, seqNum=-1, error=org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-07T15:48:39,657 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestQuotaAdmin0,,1730994510294.79945045cac83aa4484718cd7ff8f1c3., hostname=c2682f91c1e2,37185,1730994506636, seqNum=-1 is org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-07T15:48:39,657 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestQuotaAdmin0,,1730994510294.79945045cac83aa4484718cd7ff8f1c3., hostname=c2682f91c1e2,37185,1730994506636, seqNum=-1 because the exception is null or not the one we care about 2024-11-07T15:48:39,657 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] backoff.HBaseServerExceptionPauseManager(61): RpcThrottlingException suggested pause of 10000000000ns which would exceed the timeout. We should throw instead. org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-07T15:48:39,658 ERROR [Time-limited test {}] quotas.ThrottleQuotaTestUtil(110): get failed after nRetries=6 org.apache.hadoop.hbase.client.RetriesExhaustedException: Failed after attempts=1, exceptions: 2024-11-07T15:48:39.657Z, org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.TableOverAsyncTable.get(TableOverAsyncTable.java:188) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doGets(ThrottleQuotaTestUtil.java:105) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.testNamespaceClusterScopeQuota(TestClusterScopeQuotaThrottle.java:129) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] at --------Future.get--------(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.completeExceptionally(AsyncRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.tryScheduleRetry(AsyncRpcRetryingCaller.java:130) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:215) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 42 more Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-11-07T15:48:39,916 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-07T15:48:39,916 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-07T15:48:39,916 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731030514233 bypass), TestNs=QuotaState(ts=1731030514233 bypass)} 2024-11-07T15:48:39,916 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestNs:TestTable=QuotaState(ts=1731030514233 bypass), TestQuotaAdmin2=QuotaState(ts=1731030514233 bypass), TestQuotaAdmin1=QuotaState(ts=1731030514233 bypass)} 2024-11-07T15:48:39,916 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {} 2024-11-07T15:48:39,916 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731030514233 bypass)} 2024-11-07T15:48:40,167 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-07T15:48:40,167 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-11-07T15:48:40,167 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1731030514233 bypass), TestNs=QuotaState(ts=1731030514233 bypass)} 2024-11-07T15:48:40,167 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestQuotaAdmin0=QuotaState(ts=1731030514233 bypass), TestNs:TestTable=QuotaState(ts=1731030514233 bypass)} 2024-11-07T15:48:40,167 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1731030514233 bypass)} 2024-11-07T15:48:40,167 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1731030514233 bypass)} 2024-11-07T15:48:40,177 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: quotas.TestClusterScopeQuotaThrottle#testNamespaceClusterScopeQuota Thread=300 (was 300), OpenFileDescriptor=545 (was 545), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=188 (was 204), ProcessCount=11 (was 11), AvailableMemoryMB=6650 (was 6659) 2024-11-07T15:48:40,182 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable TestQuotaAdmin0 2024-11-07T15:48:40,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] procedure2.ProcedureExecutor(1139): Stored pid=22, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=TestQuotaAdmin0 2024-11-07T15:48:40,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=22 2024-11-07T15:48:40,191 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin0","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730994520191"}]},"ts":"1730994520191"} 2024-11-07T15:48:40,194 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin0, state=DISABLING in hbase:meta 2024-11-07T15:48:40,194 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestQuotaAdmin0 to state=DISABLING 2024-11-07T15:48:40,196 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=TestQuotaAdmin0}] 2024-11-07T15:48:40,201 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=24, ppid=23, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin0, region=79945045cac83aa4484718cd7ff8f1c3, UNASSIGN}] 2024-11-07T15:48:40,203 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=24, ppid=23, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin0, region=79945045cac83aa4484718cd7ff8f1c3, UNASSIGN 2024-11-07T15:48:40,205 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=24 updating hbase:meta row=79945045cac83aa4484718cd7ff8f1c3, regionState=CLOSING, regionLocation=c2682f91c1e2,37185,1730994506636 2024-11-07T15:48:40,207 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=24, ppid=23, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestQuotaAdmin0, region=79945045cac83aa4484718cd7ff8f1c3, UNASSIGN because future has completed 2024-11-07T15:48:40,207 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-07T15:48:40,207 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE, hasLock=false; CloseRegionProcedure 79945045cac83aa4484718cd7ff8f1c3, server=c2682f91c1e2,37185,1730994506636}] 2024-11-07T15:48:40,364 INFO [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] handler.UnassignRegionHandler(122): Close 79945045cac83aa4484718cd7ff8f1c3 2024-11-07T15:48:40,364 DEBUG [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-07T15:48:40,365 DEBUG [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegion(1722): Closing 79945045cac83aa4484718cd7ff8f1c3, disabling compactions & flushes 2024-11-07T15:48:40,365 INFO [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegion(1755): Closing region TestQuotaAdmin0,,1730994510294.79945045cac83aa4484718cd7ff8f1c3. 2024-11-07T15:48:40,365 DEBUG [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegion(1776): Time limited wait for close lock on TestQuotaAdmin0,,1730994510294.79945045cac83aa4484718cd7ff8f1c3. 2024-11-07T15:48:40,365 DEBUG [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegion(1843): Acquired close lock on TestQuotaAdmin0,,1730994510294.79945045cac83aa4484718cd7ff8f1c3. after waiting 0 ms 2024-11-07T15:48:40,365 DEBUG [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegion(1853): Updates disabled for region TestQuotaAdmin0,,1730994510294.79945045cac83aa4484718cd7ff8f1c3. 2024-11-07T15:48:40,369 INFO [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegion(2902): Flushing 79945045cac83aa4484718cd7ff8f1c3 1/1 column families, dataSize=578 B heapSize=2.11 KB 2024-11-07T15:48:40,427 DEBUG [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/default/TestQuotaAdmin0/79945045cac83aa4484718cd7ff8f1c3/.tmp/cf/275149d3d06943b181aa34fe339efab9 is 38, key is row-0/cf:q/1730994519630/Put/seqid=0 2024-11-07T15:48:40,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35461 is added to blk_1073741847_1023 (size=4967) 2024-11-07T15:48:40,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40735 is added to blk_1073741847_1023 (size=4967) 2024-11-07T15:48:40,443 INFO [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=578 B at sequenceid=21 (bloomFilter=false), to=hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/default/TestQuotaAdmin0/79945045cac83aa4484718cd7ff8f1c3/.tmp/cf/275149d3d06943b181aa34fe339efab9 2024-11-07T15:48:40,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=22 2024-11-07T15:48:40,492 DEBUG [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/default/TestQuotaAdmin0/79945045cac83aa4484718cd7ff8f1c3/.tmp/cf/275149d3d06943b181aa34fe339efab9 as hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/default/TestQuotaAdmin0/79945045cac83aa4484718cd7ff8f1c3/cf/275149d3d06943b181aa34fe339efab9 2024-11-07T15:48:40,503 INFO [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/default/TestQuotaAdmin0/79945045cac83aa4484718cd7ff8f1c3/cf/275149d3d06943b181aa34fe339efab9, entries=6, sequenceid=21, filesize=4.9 K 2024-11-07T15:48:40,510 INFO [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegion(3140): Finished flush of dataSize ~578 B/578, heapSize ~2.09 KB/2144, currentSize=0 B/0 for 79945045cac83aa4484718cd7ff8f1c3 in 140ms, sequenceid=21, compaction requested=false 2024-11-07T15:48:40,517 DEBUG [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/default/TestQuotaAdmin0/79945045cac83aa4484718cd7ff8f1c3/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-07T15:48:40,520 INFO [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegion(1973): Closed TestQuotaAdmin0,,1730994510294.79945045cac83aa4484718cd7ff8f1c3. 2024-11-07T15:48:40,521 DEBUG [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegion(1676): Region close journal for 79945045cac83aa4484718cd7ff8f1c3: Waiting for close lock at 1730994520365Running coprocessor pre-close hooks at 1730994520365Disabling compacts and flushes for region at 1730994520365Disabling writes for close at 1730994520365Obtaining lock to block concurrent updates at 1730994520369 (+4 ms)Preparing flush snapshotting stores in 79945045cac83aa4484718cd7ff8f1c3 at 1730994520369Finished memstore snapshotting TestQuotaAdmin0,,1730994510294.79945045cac83aa4484718cd7ff8f1c3., syncing WAL and waiting on mvcc, flushsize=dataSize=578, getHeapSize=2144, getOffHeapSize=0, getCellsCount=17 at 1730994520377 (+8 ms)Flushing stores of TestQuotaAdmin0,,1730994510294.79945045cac83aa4484718cd7ff8f1c3. at 1730994520378 (+1 ms)Flushing 79945045cac83aa4484718cd7ff8f1c3/cf: creating writer at 1730994520382 (+4 ms)Flushing 79945045cac83aa4484718cd7ff8f1c3/cf: appending metadata at 1730994520420 (+38 ms)Flushing 79945045cac83aa4484718cd7ff8f1c3/cf: closing flushed file at 1730994520423 (+3 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@19e95a56: reopening flushed file at 1730994520490 (+67 ms)Finished flush of dataSize ~578 B/578, heapSize ~2.09 KB/2144, currentSize=0 B/0 for 79945045cac83aa4484718cd7ff8f1c3 in 140ms, sequenceid=21, compaction requested=false at 1730994520510 (+20 ms)Writing region close event to WAL at 1730994520512 (+2 ms)Running coprocessor post-close hooks at 1730994520518 (+6 ms)Closed at 1730994520520 (+2 ms) 2024-11-07T15:48:40,524 INFO [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] handler.UnassignRegionHandler(157): Closed 79945045cac83aa4484718cd7ff8f1c3 2024-11-07T15:48:40,525 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=24 updating hbase:meta row=79945045cac83aa4484718cd7ff8f1c3, regionState=CLOSED 2024-11-07T15:48:40,527 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=25, ppid=24, state=RUNNABLE, hasLock=false; CloseRegionProcedure 79945045cac83aa4484718cd7ff8f1c3, server=c2682f91c1e2,37185,1730994506636 because future has completed 2024-11-07T15:48:40,533 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=25, resume processing ppid=24 2024-11-07T15:48:40,533 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=25, ppid=24, state=SUCCESS, hasLock=false; CloseRegionProcedure 79945045cac83aa4484718cd7ff8f1c3, server=c2682f91c1e2,37185,1730994506636 in 323 msec 2024-11-07T15:48:40,535 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=24, resume processing ppid=23 2024-11-07T15:48:40,536 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=24, ppid=23, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin0, region=79945045cac83aa4484718cd7ff8f1c3, UNASSIGN in 332 msec 2024-11-07T15:48:40,540 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=23, resume processing ppid=22 2024-11-07T15:48:40,540 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=23, ppid=22, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=TestQuotaAdmin0 in 341 msec 2024-11-07T15:48:40,542 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin0","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730994520542"}]},"ts":"1730994520542"} 2024-11-07T15:48:40,545 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin0, state=DISABLED in hbase:meta 2024-11-07T15:48:40,545 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestQuotaAdmin0 to state=DISABLED 2024-11-07T15:48:40,548 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=22, state=SUCCESS, hasLock=false; DisableTableProcedure table=TestQuotaAdmin0 in 362 msec 2024-11-07T15:48:40,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=22 2024-11-07T15:48:40,960 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:TestQuotaAdmin0 completed 2024-11-07T15:48:40,965 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete TestQuotaAdmin0 2024-11-07T15:48:40,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] procedure2.ProcedureExecutor(1139): Stored pid=26, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=TestQuotaAdmin0 2024-11-07T15:48:40,971 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=26, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin0 2024-11-07T15:48:40,973 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=26, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin0 2024-11-07T15:48:40,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=26 2024-11-07T15:48:40,979 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/default/TestQuotaAdmin0/79945045cac83aa4484718cd7ff8f1c3 2024-11-07T15:48:40,984 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/default/TestQuotaAdmin0/79945045cac83aa4484718cd7ff8f1c3/cf, FileablePath, hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/default/TestQuotaAdmin0/79945045cac83aa4484718cd7ff8f1c3/recovered.edits] 2024-11-07T15:48:40,993 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/default/TestQuotaAdmin0/79945045cac83aa4484718cd7ff8f1c3/cf/275149d3d06943b181aa34fe339efab9 to hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/archive/data/default/TestQuotaAdmin0/79945045cac83aa4484718cd7ff8f1c3/cf/275149d3d06943b181aa34fe339efab9 2024-11-07T15:48:40,999 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/default/TestQuotaAdmin0/79945045cac83aa4484718cd7ff8f1c3/recovered.edits/24.seqid to hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/archive/data/default/TestQuotaAdmin0/79945045cac83aa4484718cd7ff8f1c3/recovered.edits/24.seqid 2024-11-07T15:48:41,000 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/default/TestQuotaAdmin0/79945045cac83aa4484718cd7ff8f1c3 2024-11-07T15:48:41,000 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived TestQuotaAdmin0 regions 2024-11-07T15:48:41,007 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=26, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin0 2024-11-07T15:48:41,012 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34001 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-11-07T15:48:41,017 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestQuotaAdmin0 from hbase:meta 2024-11-07T15:48:41,021 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'TestQuotaAdmin0' descriptor. 2024-11-07T15:48:41,023 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=26, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin0 2024-11-07T15:48:41,024 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'TestQuotaAdmin0' from region states. 2024-11-07T15:48:41,024 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"TestQuotaAdmin0,,1730994510294.79945045cac83aa4484718cd7ff8f1c3.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1730994521024"}]},"ts":"9223372036854775807"} 2024-11-07T15:48:41,027 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 1 regions from META 2024-11-07T15:48:41,027 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 79945045cac83aa4484718cd7ff8f1c3, NAME => 'TestQuotaAdmin0,,1730994510294.79945045cac83aa4484718cd7ff8f1c3.', STARTKEY => '', ENDKEY => ''}] 2024-11-07T15:48:41,027 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'TestQuotaAdmin0' as deleted. 2024-11-07T15:48:41,028 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"TestQuotaAdmin0","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1730994521028"}]},"ts":"9223372036854775807"} 2024-11-07T15:48:41,031 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table TestQuotaAdmin0 state from META 2024-11-07T15:48:41,032 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=26, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin0 2024-11-07T15:48:41,034 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=26, state=SUCCESS, hasLock=false; DeleteTableProcedure table=TestQuotaAdmin0 in 66 msec 2024-11-07T15:48:41,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=26 2024-11-07T15:48:41,241 DEBUG [RPCClient-NioEventLoopGroup-5-4 {}] client.AsyncRegionLocator(219): Clear meta cache for TestQuotaAdmin0 2024-11-07T15:48:41,241 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:TestQuotaAdmin0 completed 2024-11-07T15:48:41,242 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable TestQuotaAdmin1 2024-11-07T15:48:41,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] procedure2.ProcedureExecutor(1139): Stored pid=27, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=TestQuotaAdmin1 2024-11-07T15:48:41,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=27 2024-11-07T15:48:41,247 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730994521247"}]},"ts":"1730994521247"} 2024-11-07T15:48:41,249 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin1, state=DISABLING in hbase:meta 2024-11-07T15:48:41,249 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestQuotaAdmin1 to state=DISABLING 2024-11-07T15:48:41,250 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=28, ppid=27, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=TestQuotaAdmin1}] 2024-11-07T15:48:41,252 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin1, region=693706afa3a03665aa040eecbb09fb84, UNASSIGN}] 2024-11-07T15:48:41,253 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=29, ppid=28, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin1, region=693706afa3a03665aa040eecbb09fb84, UNASSIGN 2024-11-07T15:48:41,254 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=29 updating hbase:meta row=693706afa3a03665aa040eecbb09fb84, regionState=CLOSING, regionLocation=c2682f91c1e2,34001,1730994506521 2024-11-07T15:48:41,256 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=29, ppid=28, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestQuotaAdmin1, region=693706afa3a03665aa040eecbb09fb84, UNASSIGN because future has completed 2024-11-07T15:48:41,257 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-07T15:48:41,257 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=30, ppid=29, state=RUNNABLE, hasLock=false; CloseRegionProcedure 693706afa3a03665aa040eecbb09fb84, server=c2682f91c1e2,34001,1730994506521}] 2024-11-07T15:48:41,410 INFO [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=30}] handler.UnassignRegionHandler(122): Close 693706afa3a03665aa040eecbb09fb84 2024-11-07T15:48:41,410 DEBUG [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=30}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-07T15:48:41,411 DEBUG [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=30}] regionserver.HRegion(1722): Closing 693706afa3a03665aa040eecbb09fb84, disabling compactions & flushes 2024-11-07T15:48:41,411 INFO [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=30}] regionserver.HRegion(1755): Closing region TestQuotaAdmin1,,1730994511126.693706afa3a03665aa040eecbb09fb84. 2024-11-07T15:48:41,411 DEBUG [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=30}] regionserver.HRegion(1776): Time limited wait for close lock on TestQuotaAdmin1,,1730994511126.693706afa3a03665aa040eecbb09fb84. 2024-11-07T15:48:41,411 DEBUG [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=30}] regionserver.HRegion(1843): Acquired close lock on TestQuotaAdmin1,,1730994511126.693706afa3a03665aa040eecbb09fb84. after waiting 0 ms 2024-11-07T15:48:41,411 DEBUG [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=30}] regionserver.HRegion(1853): Updates disabled for region TestQuotaAdmin1,,1730994511126.693706afa3a03665aa040eecbb09fb84. 2024-11-07T15:48:41,418 DEBUG [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=30}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/default/TestQuotaAdmin1/693706afa3a03665aa040eecbb09fb84/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-07T15:48:41,419 INFO [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=30}] regionserver.HRegion(1973): Closed TestQuotaAdmin1,,1730994511126.693706afa3a03665aa040eecbb09fb84. 2024-11-07T15:48:41,420 DEBUG [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=30}] regionserver.HRegion(1676): Region close journal for 693706afa3a03665aa040eecbb09fb84: Waiting for close lock at 1730994521411Running coprocessor pre-close hooks at 1730994521411Disabling compacts and flushes for region at 1730994521411Disabling writes for close at 1730994521411Writing region close event to WAL at 1730994521412 (+1 ms)Running coprocessor post-close hooks at 1730994521419 (+7 ms)Closed at 1730994521419 2024-11-07T15:48:41,423 INFO [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=30}] handler.UnassignRegionHandler(157): Closed 693706afa3a03665aa040eecbb09fb84 2024-11-07T15:48:41,424 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=29 updating hbase:meta row=693706afa3a03665aa040eecbb09fb84, regionState=CLOSED 2024-11-07T15:48:41,426 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=30, ppid=29, state=RUNNABLE, hasLock=false; CloseRegionProcedure 693706afa3a03665aa040eecbb09fb84, server=c2682f91c1e2,34001,1730994506521 because future has completed 2024-11-07T15:48:41,431 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=30, resume processing ppid=29 2024-11-07T15:48:41,431 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=30, ppid=29, state=SUCCESS, hasLock=false; CloseRegionProcedure 693706afa3a03665aa040eecbb09fb84, server=c2682f91c1e2,34001,1730994506521 in 171 msec 2024-11-07T15:48:41,433 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=29, resume processing ppid=28 2024-11-07T15:48:41,433 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=29, ppid=28, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin1, region=693706afa3a03665aa040eecbb09fb84, UNASSIGN in 179 msec 2024-11-07T15:48:41,436 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=28, resume processing ppid=27 2024-11-07T15:48:41,436 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=28, ppid=27, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=TestQuotaAdmin1 in 184 msec 2024-11-07T15:48:41,438 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730994521438"}]},"ts":"1730994521438"} 2024-11-07T15:48:41,441 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin1, state=DISABLED in hbase:meta 2024-11-07T15:48:41,441 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestQuotaAdmin1 to state=DISABLED 2024-11-07T15:48:41,445 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=27, state=SUCCESS, hasLock=false; DisableTableProcedure table=TestQuotaAdmin1 in 200 msec 2024-11-07T15:48:41,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=27 2024-11-07T15:48:41,500 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:TestQuotaAdmin1 completed 2024-11-07T15:48:41,501 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete TestQuotaAdmin1 2024-11-07T15:48:41,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] procedure2.ProcedureExecutor(1139): Stored pid=31, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=TestQuotaAdmin1 2024-11-07T15:48:41,504 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=31, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin1 2024-11-07T15:48:41,505 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=31, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin1 2024-11-07T15:48:41,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-11-07T15:48:41,516 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/default/TestQuotaAdmin1/693706afa3a03665aa040eecbb09fb84 2024-11-07T15:48:41,520 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/default/TestQuotaAdmin1/693706afa3a03665aa040eecbb09fb84/cf, FileablePath, hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/default/TestQuotaAdmin1/693706afa3a03665aa040eecbb09fb84/recovered.edits] 2024-11-07T15:48:41,531 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/default/TestQuotaAdmin1/693706afa3a03665aa040eecbb09fb84/recovered.edits/4.seqid to hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/archive/data/default/TestQuotaAdmin1/693706afa3a03665aa040eecbb09fb84/recovered.edits/4.seqid 2024-11-07T15:48:41,532 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/default/TestQuotaAdmin1/693706afa3a03665aa040eecbb09fb84 2024-11-07T15:48:41,532 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestQuotaAdmin1 regions 2024-11-07T15:48:41,536 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=31, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin1 2024-11-07T15:48:41,539 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestQuotaAdmin1 from hbase:meta 2024-11-07T15:48:41,543 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'TestQuotaAdmin1' descriptor. 2024-11-07T15:48:41,545 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=31, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin1 2024-11-07T15:48:41,545 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'TestQuotaAdmin1' from region states. 2024-11-07T15:48:41,546 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"TestQuotaAdmin1,,1730994511126.693706afa3a03665aa040eecbb09fb84.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1730994521545"}]},"ts":"9223372036854775807"} 2024-11-07T15:48:41,549 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 1 regions from META 2024-11-07T15:48:41,549 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 693706afa3a03665aa040eecbb09fb84, NAME => 'TestQuotaAdmin1,,1730994511126.693706afa3a03665aa040eecbb09fb84.', STARTKEY => '', ENDKEY => ''}] 2024-11-07T15:48:41,549 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'TestQuotaAdmin1' as deleted. 2024-11-07T15:48:41,549 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"TestQuotaAdmin1","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1730994521549"}]},"ts":"9223372036854775807"} 2024-11-07T15:48:41,553 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table TestQuotaAdmin1 state from META 2024-11-07T15:48:41,554 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=31, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin1 2024-11-07T15:48:41,556 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=31, state=SUCCESS, hasLock=false; DeleteTableProcedure table=TestQuotaAdmin1 in 53 msec 2024-11-07T15:48:41,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-11-07T15:48:41,770 DEBUG [RPCClient-NioEventLoopGroup-5-4 {}] client.AsyncRegionLocator(219): Clear meta cache for TestQuotaAdmin1 2024-11-07T15:48:41,770 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:TestQuotaAdmin1 completed 2024-11-07T15:48:41,771 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable TestQuotaAdmin2 2024-11-07T15:48:41,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] procedure2.ProcedureExecutor(1139): Stored pid=32, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=TestQuotaAdmin2 2024-11-07T15:48:41,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=32 2024-11-07T15:48:41,776 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin2","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730994521776"}]},"ts":"1730994521776"} 2024-11-07T15:48:41,778 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin2, state=DISABLING in hbase:meta 2024-11-07T15:48:41,779 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestQuotaAdmin2 to state=DISABLING 2024-11-07T15:48:41,780 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=TestQuotaAdmin2}] 2024-11-07T15:48:41,782 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=34, ppid=33, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin2, region=b60425053afc90dabdcdd8551fa12ad1, UNASSIGN}] 2024-11-07T15:48:41,783 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=34, ppid=33, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin2, region=b60425053afc90dabdcdd8551fa12ad1, UNASSIGN 2024-11-07T15:48:41,784 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=34 updating hbase:meta row=b60425053afc90dabdcdd8551fa12ad1, regionState=CLOSING, regionLocation=c2682f91c1e2,34001,1730994506521 2024-11-07T15:48:41,786 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=34, ppid=33, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestQuotaAdmin2, region=b60425053afc90dabdcdd8551fa12ad1, UNASSIGN because future has completed 2024-11-07T15:48:41,786 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-07T15:48:41,787 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=35, ppid=34, state=RUNNABLE, hasLock=false; CloseRegionProcedure b60425053afc90dabdcdd8551fa12ad1, server=c2682f91c1e2,34001,1730994506521}] 2024-11-07T15:48:41,940 INFO [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(122): Close b60425053afc90dabdcdd8551fa12ad1 2024-11-07T15:48:41,940 DEBUG [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-07T15:48:41,940 DEBUG [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1722): Closing b60425053afc90dabdcdd8551fa12ad1, disabling compactions & flushes 2024-11-07T15:48:41,940 INFO [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1755): Closing region TestQuotaAdmin2,,1730994511923.b60425053afc90dabdcdd8551fa12ad1. 2024-11-07T15:48:41,940 DEBUG [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1776): Time limited wait for close lock on TestQuotaAdmin2,,1730994511923.b60425053afc90dabdcdd8551fa12ad1. 2024-11-07T15:48:41,940 DEBUG [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1843): Acquired close lock on TestQuotaAdmin2,,1730994511923.b60425053afc90dabdcdd8551fa12ad1. after waiting 0 ms 2024-11-07T15:48:41,940 DEBUG [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1853): Updates disabled for region TestQuotaAdmin2,,1730994511923.b60425053afc90dabdcdd8551fa12ad1. 2024-11-07T15:48:41,946 DEBUG [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/default/TestQuotaAdmin2/b60425053afc90dabdcdd8551fa12ad1/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-07T15:48:41,946 INFO [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1973): Closed TestQuotaAdmin2,,1730994511923.b60425053afc90dabdcdd8551fa12ad1. 2024-11-07T15:48:41,947 DEBUG [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1676): Region close journal for b60425053afc90dabdcdd8551fa12ad1: Waiting for close lock at 1730994521940Running coprocessor pre-close hooks at 1730994521940Disabling compacts and flushes for region at 1730994521940Disabling writes for close at 1730994521940Writing region close event to WAL at 1730994521941 (+1 ms)Running coprocessor post-close hooks at 1730994521946 (+5 ms)Closed at 1730994521946 2024-11-07T15:48:41,949 INFO [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(157): Closed b60425053afc90dabdcdd8551fa12ad1 2024-11-07T15:48:41,950 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=34 updating hbase:meta row=b60425053afc90dabdcdd8551fa12ad1, regionState=CLOSED 2024-11-07T15:48:41,952 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=35, ppid=34, state=RUNNABLE, hasLock=false; CloseRegionProcedure b60425053afc90dabdcdd8551fa12ad1, server=c2682f91c1e2,34001,1730994506521 because future has completed 2024-11-07T15:48:41,955 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=35, resume processing ppid=34 2024-11-07T15:48:41,955 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=35, ppid=34, state=SUCCESS, hasLock=false; CloseRegionProcedure b60425053afc90dabdcdd8551fa12ad1, server=c2682f91c1e2,34001,1730994506521 in 167 msec 2024-11-07T15:48:41,957 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=34, resume processing ppid=33 2024-11-07T15:48:41,957 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=34, ppid=33, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin2, region=b60425053afc90dabdcdd8551fa12ad1, UNASSIGN in 173 msec 2024-11-07T15:48:41,960 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=33, resume processing ppid=32 2024-11-07T15:48:41,961 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=33, ppid=32, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=TestQuotaAdmin2 in 179 msec 2024-11-07T15:48:41,963 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin2","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730994521962"}]},"ts":"1730994521962"} 2024-11-07T15:48:41,965 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin2, state=DISABLED in hbase:meta 2024-11-07T15:48:41,965 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestQuotaAdmin2 to state=DISABLED 2024-11-07T15:48:41,968 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=32, state=SUCCESS, hasLock=false; DisableTableProcedure table=TestQuotaAdmin2 in 195 msec 2024-11-07T15:48:42,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=32 2024-11-07T15:48:42,030 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:TestQuotaAdmin2 completed 2024-11-07T15:48:42,031 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete TestQuotaAdmin2 2024-11-07T15:48:42,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] procedure2.ProcedureExecutor(1139): Stored pid=36, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=TestQuotaAdmin2 2024-11-07T15:48:42,033 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=36, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin2 2024-11-07T15:48:42,035 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=36, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin2 2024-11-07T15:48:42,037 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/default/TestQuotaAdmin2/b60425053afc90dabdcdd8551fa12ad1 2024-11-07T15:48:42,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=36 2024-11-07T15:48:42,040 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/default/TestQuotaAdmin2/b60425053afc90dabdcdd8551fa12ad1/cf, FileablePath, hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/default/TestQuotaAdmin2/b60425053afc90dabdcdd8551fa12ad1/recovered.edits] 2024-11-07T15:48:42,049 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/default/TestQuotaAdmin2/b60425053afc90dabdcdd8551fa12ad1/recovered.edits/4.seqid to hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/archive/data/default/TestQuotaAdmin2/b60425053afc90dabdcdd8551fa12ad1/recovered.edits/4.seqid 2024-11-07T15:48:42,049 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/default/TestQuotaAdmin2/b60425053afc90dabdcdd8551fa12ad1 2024-11-07T15:48:42,050 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestQuotaAdmin2 regions 2024-11-07T15:48:42,053 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=36, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin2 2024-11-07T15:48:42,056 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestQuotaAdmin2 from hbase:meta 2024-11-07T15:48:42,059 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'TestQuotaAdmin2' descriptor. 2024-11-07T15:48:42,061 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=36, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin2 2024-11-07T15:48:42,061 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'TestQuotaAdmin2' from region states. 2024-11-07T15:48:42,061 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"TestQuotaAdmin2,,1730994511923.b60425053afc90dabdcdd8551fa12ad1.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1730994522061"}]},"ts":"9223372036854775807"} 2024-11-07T15:48:42,064 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 1 regions from META 2024-11-07T15:48:42,064 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => b60425053afc90dabdcdd8551fa12ad1, NAME => 'TestQuotaAdmin2,,1730994511923.b60425053afc90dabdcdd8551fa12ad1.', STARTKEY => '', ENDKEY => ''}] 2024-11-07T15:48:42,064 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'TestQuotaAdmin2' as deleted. 2024-11-07T15:48:42,064 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"TestQuotaAdmin2","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1730994522064"}]},"ts":"9223372036854775807"} 2024-11-07T15:48:42,066 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table TestQuotaAdmin2 state from META 2024-11-07T15:48:42,067 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=36, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin2 2024-11-07T15:48:42,069 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=36, state=SUCCESS, hasLock=false; DeleteTableProcedure table=TestQuotaAdmin2 in 36 msec 2024-11-07T15:48:42,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=36 2024-11-07T15:48:42,300 DEBUG [RPCClient-NioEventLoopGroup-5-4 {}] client.AsyncRegionLocator(219): Clear meta cache for TestQuotaAdmin2 2024-11-07T15:48:42,301 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:TestQuotaAdmin2 completed 2024-11-07T15:48:42,301 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable TestNs:TestTable 2024-11-07T15:48:42,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] procedure2.ProcedureExecutor(1139): Stored pid=37, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=TestNs:TestTable 2024-11-07T15:48:42,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=37 2024-11-07T15:48:42,307 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestNs:TestTable","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730994522306"}]},"ts":"1730994522306"} 2024-11-07T15:48:42,309 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestNs:TestTable, state=DISABLING in hbase:meta 2024-11-07T15:48:42,310 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestNs:TestTable to state=DISABLING 2024-11-07T15:48:42,311 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=38, ppid=37, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=TestNs:TestTable}] 2024-11-07T15:48:42,313 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=448b38f9959837a3b8eeeee19aa6a85d, UNASSIGN}, {pid=40, ppid=38, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=7319af31ce83708d5bbd35fc3fa17e8e, UNASSIGN}] 2024-11-07T15:48:42,314 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=40, ppid=38, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=7319af31ce83708d5bbd35fc3fa17e8e, UNASSIGN 2024-11-07T15:48:42,315 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=39, ppid=38, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=448b38f9959837a3b8eeeee19aa6a85d, UNASSIGN 2024-11-07T15:48:42,316 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=40 updating hbase:meta row=7319af31ce83708d5bbd35fc3fa17e8e, regionState=CLOSING, regionLocation=c2682f91c1e2,37185,1730994506636 2024-11-07T15:48:42,317 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=39 updating hbase:meta row=448b38f9959837a3b8eeeee19aa6a85d, regionState=CLOSING, regionLocation=c2682f91c1e2,34001,1730994506521 2024-11-07T15:48:42,318 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=40, ppid=38, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestNs:TestTable, region=7319af31ce83708d5bbd35fc3fa17e8e, UNASSIGN because future has completed 2024-11-07T15:48:42,318 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-07T15:48:42,318 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=41, ppid=40, state=RUNNABLE, hasLock=false; CloseRegionProcedure 7319af31ce83708d5bbd35fc3fa17e8e, server=c2682f91c1e2,37185,1730994506636}] 2024-11-07T15:48:42,320 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=39, ppid=38, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestNs:TestTable, region=448b38f9959837a3b8eeeee19aa6a85d, UNASSIGN because future has completed 2024-11-07T15:48:42,320 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-07T15:48:42,320 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=42, ppid=39, state=RUNNABLE, hasLock=false; CloseRegionProcedure 448b38f9959837a3b8eeeee19aa6a85d, server=c2682f91c1e2,34001,1730994506521}] 2024-11-07T15:48:42,472 INFO [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] handler.UnassignRegionHandler(122): Close 7319af31ce83708d5bbd35fc3fa17e8e 2024-11-07T15:48:42,472 DEBUG [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-07T15:48:42,472 DEBUG [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1722): Closing 7319af31ce83708d5bbd35fc3fa17e8e, disabling compactions & flushes 2024-11-07T15:48:42,472 INFO [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1755): Closing region TestNs:TestTable,1,1730994512992.7319af31ce83708d5bbd35fc3fa17e8e. 2024-11-07T15:48:42,472 DEBUG [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1776): Time limited wait for close lock on TestNs:TestTable,1,1730994512992.7319af31ce83708d5bbd35fc3fa17e8e. 2024-11-07T15:48:42,472 DEBUG [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1843): Acquired close lock on TestNs:TestTable,1,1730994512992.7319af31ce83708d5bbd35fc3fa17e8e. after waiting 0 ms 2024-11-07T15:48:42,472 DEBUG [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1853): Updates disabled for region TestNs:TestTable,1,1730994512992.7319af31ce83708d5bbd35fc3fa17e8e. 2024-11-07T15:48:42,474 INFO [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(122): Close 448b38f9959837a3b8eeeee19aa6a85d 2024-11-07T15:48:42,474 DEBUG [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-07T15:48:42,474 DEBUG [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1722): Closing 448b38f9959837a3b8eeeee19aa6a85d, disabling compactions & flushes 2024-11-07T15:48:42,474 INFO [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1755): Closing region TestNs:TestTable,,1730994512992.448b38f9959837a3b8eeeee19aa6a85d. 2024-11-07T15:48:42,474 DEBUG [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1776): Time limited wait for close lock on TestNs:TestTable,,1730994512992.448b38f9959837a3b8eeeee19aa6a85d. 2024-11-07T15:48:42,474 DEBUG [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1843): Acquired close lock on TestNs:TestTable,,1730994512992.448b38f9959837a3b8eeeee19aa6a85d. after waiting 0 ms 2024-11-07T15:48:42,474 DEBUG [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1853): Updates disabled for region TestNs:TestTable,,1730994512992.448b38f9959837a3b8eeeee19aa6a85d. 2024-11-07T15:48:42,478 DEBUG [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/TestNs/TestTable/7319af31ce83708d5bbd35fc3fa17e8e/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-07T15:48:42,479 INFO [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1973): Closed TestNs:TestTable,1,1730994512992.7319af31ce83708d5bbd35fc3fa17e8e. 2024-11-07T15:48:42,479 DEBUG [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1676): Region close journal for 7319af31ce83708d5bbd35fc3fa17e8e: Waiting for close lock at 1730994522472Running coprocessor pre-close hooks at 1730994522472Disabling compacts and flushes for region at 1730994522472Disabling writes for close at 1730994522472Writing region close event to WAL at 1730994522473 (+1 ms)Running coprocessor post-close hooks at 1730994522479 (+6 ms)Closed at 1730994522479 2024-11-07T15:48:42,482 DEBUG [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/TestNs/TestTable/448b38f9959837a3b8eeeee19aa6a85d/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-07T15:48:42,482 INFO [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] handler.UnassignRegionHandler(157): Closed 7319af31ce83708d5bbd35fc3fa17e8e 2024-11-07T15:48:42,483 INFO [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1973): Closed TestNs:TestTable,,1730994512992.448b38f9959837a3b8eeeee19aa6a85d. 2024-11-07T15:48:42,483 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=40 updating hbase:meta row=7319af31ce83708d5bbd35fc3fa17e8e, regionState=CLOSED 2024-11-07T15:48:42,483 DEBUG [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1676): Region close journal for 448b38f9959837a3b8eeeee19aa6a85d: Waiting for close lock at 1730994522474Running coprocessor pre-close hooks at 1730994522474Disabling compacts and flushes for region at 1730994522474Disabling writes for close at 1730994522474Writing region close event to WAL at 1730994522475 (+1 ms)Running coprocessor post-close hooks at 1730994522482 (+7 ms)Closed at 1730994522483 (+1 ms) 2024-11-07T15:48:42,485 INFO [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(157): Closed 448b38f9959837a3b8eeeee19aa6a85d 2024-11-07T15:48:42,485 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=41, ppid=40, state=RUNNABLE, hasLock=false; CloseRegionProcedure 7319af31ce83708d5bbd35fc3fa17e8e, server=c2682f91c1e2,37185,1730994506636 because future has completed 2024-11-07T15:48:42,486 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=39 updating hbase:meta row=448b38f9959837a3b8eeeee19aa6a85d, regionState=CLOSED 2024-11-07T15:48:42,489 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=42, ppid=39, state=RUNNABLE, hasLock=false; CloseRegionProcedure 448b38f9959837a3b8eeeee19aa6a85d, server=c2682f91c1e2,34001,1730994506521 because future has completed 2024-11-07T15:48:42,491 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=41, resume processing ppid=40 2024-11-07T15:48:42,491 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=41, ppid=40, state=SUCCESS, hasLock=false; CloseRegionProcedure 7319af31ce83708d5bbd35fc3fa17e8e, server=c2682f91c1e2,37185,1730994506636 in 169 msec 2024-11-07T15:48:42,492 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=42, resume processing ppid=39 2024-11-07T15:48:42,492 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=42, ppid=39, state=SUCCESS, hasLock=false; CloseRegionProcedure 448b38f9959837a3b8eeeee19aa6a85d, server=c2682f91c1e2,34001,1730994506521 in 170 msec 2024-11-07T15:48:42,493 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=40, ppid=38, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=7319af31ce83708d5bbd35fc3fa17e8e, UNASSIGN in 178 msec 2024-11-07T15:48:42,495 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=39, resume processing ppid=38 2024-11-07T15:48:42,495 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=39, ppid=38, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=448b38f9959837a3b8eeeee19aa6a85d, UNASSIGN in 179 msec 2024-11-07T15:48:42,498 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=38, resume processing ppid=37 2024-11-07T15:48:42,498 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=38, ppid=37, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=TestNs:TestTable in 185 msec 2024-11-07T15:48:42,500 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestNs:TestTable","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1730994522500"}]},"ts":"1730994522500"} 2024-11-07T15:48:42,502 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=TestNs:TestTable, state=DISABLED in hbase:meta 2024-11-07T15:48:42,502 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestNs:TestTable to state=DISABLED 2024-11-07T15:48:42,504 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=37, state=SUCCESS, hasLock=false; DisableTableProcedure table=TestNs:TestTable in 201 msec 2024-11-07T15:48:42,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=37 2024-11-07T15:48:42,561 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: TestNs:TestTable completed 2024-11-07T15:48:42,561 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete TestNs:TestTable 2024-11-07T15:48:42,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] procedure2.ProcedureExecutor(1139): Stored pid=43, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=TestNs:TestTable 2024-11-07T15:48:42,564 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=43, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=TestNs:TestTable 2024-11-07T15:48:42,565 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=43, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=TestNs:TestTable 2024-11-07T15:48:42,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=43 2024-11-07T15:48:42,570 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/TestNs/TestTable/448b38f9959837a3b8eeeee19aa6a85d 2024-11-07T15:48:42,570 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/TestNs/TestTable/7319af31ce83708d5bbd35fc3fa17e8e 2024-11-07T15:48:42,573 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/TestNs/TestTable/7319af31ce83708d5bbd35fc3fa17e8e/cf, FileablePath, hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/TestNs/TestTable/7319af31ce83708d5bbd35fc3fa17e8e/recovered.edits] 2024-11-07T15:48:42,573 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/TestNs/TestTable/448b38f9959837a3b8eeeee19aa6a85d/cf, FileablePath, hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/TestNs/TestTable/448b38f9959837a3b8eeeee19aa6a85d/recovered.edits] 2024-11-07T15:48:42,580 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/TestNs/TestTable/448b38f9959837a3b8eeeee19aa6a85d/recovered.edits/4.seqid to hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/archive/data/TestNs/TestTable/448b38f9959837a3b8eeeee19aa6a85d/recovered.edits/4.seqid 2024-11-07T15:48:42,581 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/TestNs/TestTable/7319af31ce83708d5bbd35fc3fa17e8e/recovered.edits/4.seqid to hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/archive/data/TestNs/TestTable/7319af31ce83708d5bbd35fc3fa17e8e/recovered.edits/4.seqid 2024-11-07T15:48:42,581 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/TestNs/TestTable/448b38f9959837a3b8eeeee19aa6a85d 2024-11-07T15:48:42,581 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/TestNs/TestTable/7319af31ce83708d5bbd35fc3fa17e8e 2024-11-07T15:48:42,581 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestNs:TestTable regions 2024-11-07T15:48:42,584 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=43, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=TestNs:TestTable 2024-11-07T15:48:42,587 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of TestNs:TestTable from hbase:meta 2024-11-07T15:48:42,590 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'TestNs:TestTable' descriptor. 2024-11-07T15:48:42,591 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=43, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=TestNs:TestTable 2024-11-07T15:48:42,592 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'TestNs:TestTable' from region states. 2024-11-07T15:48:42,592 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"TestNs:TestTable,,1730994512992.448b38f9959837a3b8eeeee19aa6a85d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1730994522592"}]},"ts":"9223372036854775807"} 2024-11-07T15:48:42,592 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"TestNs:TestTable,1,1730994512992.7319af31ce83708d5bbd35fc3fa17e8e.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1730994522592"}]},"ts":"9223372036854775807"} 2024-11-07T15:48:42,595 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-07T15:48:42,595 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 448b38f9959837a3b8eeeee19aa6a85d, NAME => 'TestNs:TestTable,,1730994512992.448b38f9959837a3b8eeeee19aa6a85d.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 7319af31ce83708d5bbd35fc3fa17e8e, NAME => 'TestNs:TestTable,1,1730994512992.7319af31ce83708d5bbd35fc3fa17e8e.', STARTKEY => '1', ENDKEY => ''}] 2024-11-07T15:48:42,595 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'TestNs:TestTable' as deleted. 2024-11-07T15:48:42,595 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"TestNs:TestTable","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1730994522595"}]},"ts":"9223372036854775807"} 2024-11-07T15:48:42,597 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table TestNs:TestTable state from META 2024-11-07T15:48:42,598 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=43, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=TestNs:TestTable 2024-11-07T15:48:42,600 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=43, state=SUCCESS, hasLock=false; DeleteTableProcedure table=TestNs:TestTable in 36 msec 2024-11-07T15:48:42,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=43 2024-11-07T15:48:42,831 DEBUG [RPCClient-NioEventLoopGroup-5-4 {}] client.AsyncRegionLocator(219): Clear meta cache for TestNs:TestTable 2024-11-07T15:48:42,831 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: TestNs:TestTable completed 2024-11-07T15:48:42,835 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] master.HMaster$20(3601): Client=jenkins//172.17.0.2 delete TestNs 2024-11-07T15:48:42,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] procedure2.ProcedureExecutor(1139): Stored pid=44, state=RUNNABLE:DELETE_NAMESPACE_PREPARE, hasLock=false; DeleteNamespaceProcedure, namespace=TestNs 2024-11-07T15:48:42,842 INFO [PEWorker-1 {}] procedure.DeleteNamespaceProcedure(67): pid=44, state=RUNNABLE:DELETE_NAMESPACE_PREPARE, hasLock=true; DeleteNamespaceProcedure, namespace=TestNs 2024-11-07T15:48:42,845 INFO [PEWorker-1 {}] procedure.DeleteNamespaceProcedure(67): pid=44, state=RUNNABLE:DELETE_NAMESPACE_DELETE_FROM_NS_TABLE, hasLock=true; DeleteNamespaceProcedure, namespace=TestNs 2024-11-07T15:48:42,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-11-07T15:48:42,849 INFO [PEWorker-1 {}] procedure.DeleteNamespaceProcedure(67): pid=44, state=RUNNABLE:DELETE_NAMESPACE_DELETE_DIRECTORIES, hasLock=true; DeleteNamespaceProcedure, namespace=TestNs 2024-11-07T15:48:42,852 INFO [PEWorker-1 {}] procedure.DeleteNamespaceProcedure(67): pid=44, state=RUNNABLE:DELETE_NAMESPACE_REMOVE_NAMESPACE_QUOTA, hasLock=true; DeleteNamespaceProcedure, namespace=TestNs 2024-11-07T15:48:42,855 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=44, state=SUCCESS, hasLock=false; DeleteNamespaceProcedure, namespace=TestNs in 16 msec 2024-11-07T15:48:43,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38893 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-11-07T15:48:43,110 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$NamespaceProcedureBiConsumer(2745): Operation: DELETE_NAMESPACE, Namespace: TestNs completed 2024-11-07T15:48:43,110 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-07T15:48:43,110 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-07T15:48:43,110 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.tearDownAfterClass(TestClusterScopeQuotaThrottle.java:107) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T15:48:43,115 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T15:48:43,116 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T15:48:43,116 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-07T15:48:43,116 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-07T15:48:43,117 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=840105357, stopped=false 2024-11-07T15:48:43,117 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.quotas.MasterQuotasObserver 2024-11-07T15:48:43,117 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=c2682f91c1e2,38893,1730994505717 2024-11-07T15:48:43,119 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37185-0x101836b79d20002, quorum=127.0.0.1:50813, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-07T15:48:43,119 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38893-0x101836b79d20000, quorum=127.0.0.1:50813, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-07T15:48:43,119 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37185-0x101836b79d20002, quorum=127.0.0.1:50813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T15:48:43,119 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34001-0x101836b79d20001, quorum=127.0.0.1:50813, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-07T15:48:43,119 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-07T15:48:43,119 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38893-0x101836b79d20000, quorum=127.0.0.1:50813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T15:48:43,119 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34001-0x101836b79d20001, quorum=127.0.0.1:50813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T15:48:43,120 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-07T15:48:43,120 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.tearDownAfterClass(TestClusterScopeQuotaThrottle.java:107) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T15:48:43,120 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T15:48:43,120 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37185-0x101836b79d20002, quorum=127.0.0.1:50813, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T15:48:43,120 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c2682f91c1e2,34001,1730994506521' ***** 2024-11-07T15:48:43,120 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34001-0x101836b79d20001, quorum=127.0.0.1:50813, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T15:48:43,120 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-07T15:48:43,121 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c2682f91c1e2,37185,1730994506636' ***** 2024-11-07T15:48:43,121 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-07T15:48:43,121 INFO [RS:0;c2682f91c1e2:34001 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-07T15:48:43,121 INFO [RS:1;c2682f91c1e2:37185 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-07T15:48:43,121 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38893-0x101836b79d20000, quorum=127.0.0.1:50813, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-07T15:48:43,121 INFO [RS:1;c2682f91c1e2:37185 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-07T15:48:43,121 INFO [RS:0;c2682f91c1e2:34001 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-07T15:48:43,121 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-07T15:48:43,121 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-07T15:48:43,121 INFO [RS:1;c2682f91c1e2:37185 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-07T15:48:43,121 INFO [RS:0;c2682f91c1e2:34001 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-07T15:48:43,122 INFO [RS:1;c2682f91c1e2:37185 {}] regionserver.HRegionServer(3091): Received CLOSE for 72d83c7eae51146f5b941b52ec6566e1 2024-11-07T15:48:43,122 INFO [RS:0;c2682f91c1e2:34001 {}] regionserver.HRegionServer(959): stopping server c2682f91c1e2,34001,1730994506521 2024-11-07T15:48:43,122 INFO [RS:0;c2682f91c1e2:34001 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-07T15:48:43,122 INFO [RS:0;c2682f91c1e2:34001 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;c2682f91c1e2:34001. 2024-11-07T15:48:43,122 INFO [RS:1;c2682f91c1e2:37185 {}] regionserver.HRegionServer(959): stopping server c2682f91c1e2,37185,1730994506636 2024-11-07T15:48:43,122 INFO [RS:1;c2682f91c1e2:37185 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-07T15:48:43,122 DEBUG [RS:0;c2682f91c1e2:34001 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T15:48:43,122 INFO [RS:1;c2682f91c1e2:37185 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;c2682f91c1e2:37185. 2024-11-07T15:48:43,122 DEBUG [RS:0;c2682f91c1e2:34001 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T15:48:43,122 DEBUG [RS:1;c2682f91c1e2:37185 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-07T15:48:43,122 DEBUG [RS:1;c2682f91c1e2:37185 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T15:48:43,122 INFO [RS:0;c2682f91c1e2:34001 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-07T15:48:43,122 INFO [RS:0;c2682f91c1e2:34001 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-07T15:48:43,122 INFO [RS:1;c2682f91c1e2:37185 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-07T15:48:43,122 INFO [RS:0;c2682f91c1e2:34001 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-07T15:48:43,123 DEBUG [RS:1;c2682f91c1e2:37185 {}] regionserver.HRegionServer(1325): Online Regions={72d83c7eae51146f5b941b52ec6566e1=hbase:quota,,1730994509346.72d83c7eae51146f5b941b52ec6566e1.} 2024-11-07T15:48:43,123 INFO [RS:0;c2682f91c1e2:34001 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-07T15:48:43,123 DEBUG [RS:1;c2682f91c1e2:37185 {}] regionserver.HRegionServer(1351): Waiting on 72d83c7eae51146f5b941b52ec6566e1 2024-11-07T15:48:43,123 DEBUG [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 72d83c7eae51146f5b941b52ec6566e1, disabling compactions & flushes 2024-11-07T15:48:43,123 INFO [RS:0;c2682f91c1e2:34001 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-07T15:48:43,123 INFO [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region hbase:quota,,1730994509346.72d83c7eae51146f5b941b52ec6566e1. 2024-11-07T15:48:43,123 DEBUG [RS:0;c2682f91c1e2:34001 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-07T15:48:43,123 DEBUG [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:quota,,1730994509346.72d83c7eae51146f5b941b52ec6566e1. 2024-11-07T15:48:43,123 DEBUG [RS:0;c2682f91c1e2:34001 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-07T15:48:43,123 DEBUG [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on hbase:quota,,1730994509346.72d83c7eae51146f5b941b52ec6566e1. after waiting 0 ms 2024-11-07T15:48:43,123 DEBUG [RS_CLOSE_META-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-07T15:48:43,123 DEBUG [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region hbase:quota,,1730994509346.72d83c7eae51146f5b941b52ec6566e1. 2024-11-07T15:48:43,123 INFO [RS_CLOSE_META-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-07T15:48:43,123 DEBUG [RS_CLOSE_META-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-07T15:48:43,123 DEBUG [RS_CLOSE_META-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-07T15:48:43,123 DEBUG [RS_CLOSE_META-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-07T15:48:43,123 INFO [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 72d83c7eae51146f5b941b52ec6566e1 2/2 column families, dataSize=726 B heapSize=2.44 KB 2024-11-07T15:48:43,123 INFO [RS_CLOSE_META-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=13.33 KB heapSize=24.55 KB 2024-11-07T15:48:43,142 INFO [regionserver/c2682f91c1e2:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-07T15:48:43,145 INFO [regionserver/c2682f91c1e2:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-07T15:48:43,154 DEBUG [RS_CLOSE_META-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/hbase/meta/1588230740/.tmp/info/7fbdbe74bbd4408793be091ba7d53d25 is 135, key is hbase:quota,,1730994509346.72d83c7eae51146f5b941b52ec6566e1./info:regioninfo/1730994510229/Put/seqid=0 2024-11-07T15:48:43,155 DEBUG [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/hbase/quota/72d83c7eae51146f5b941b52ec6566e1/.tmp/q/9e10f9dae8eb4bddb632e5d77990c9ed is 44, key is u.jenkins/q:s.default:/1730994515959/DeleteColumn/seqid=0 2024-11-07T15:48:43,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35461 is added to blk_1073741848_1024 (size=7362) 2024-11-07T15:48:43,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40735 is added to blk_1073741848_1024 (size=7362) 2024-11-07T15:48:43,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35461 is added to blk_1073741849_1025 (size=5302) 2024-11-07T15:48:43,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40735 is added to blk_1073741849_1025 (size=5302) 2024-11-07T15:48:43,163 INFO [RS_CLOSE_META-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.80 KB at sequenceid=65 (bloomFilter=true), to=hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/hbase/meta/1588230740/.tmp/info/7fbdbe74bbd4408793be091ba7d53d25 2024-11-07T15:48:43,190 DEBUG [RS_CLOSE_META-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/hbase/meta/1588230740/.tmp/ns/219bd348661e4a9f94dba25d7b236bf8 is 92, key is TestNs:TestTable,1,1730994512992.7319af31ce83708d5bbd35fc3fa17e8e./ns:/1730994522584/DeleteFamily/seqid=0 2024-11-07T15:48:43,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35461 is added to blk_1073741850_1026 (size=5710) 2024-11-07T15:48:43,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40735 is added to blk_1073741850_1026 (size=5710) 2024-11-07T15:48:43,197 INFO [RS_CLOSE_META-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=572 B at sequenceid=65 (bloomFilter=true), to=hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/hbase/meta/1588230740/.tmp/ns/219bd348661e4a9f94dba25d7b236bf8 2024-11-07T15:48:43,221 DEBUG [RS_CLOSE_META-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/hbase/meta/1588230740/.tmp/rep_barrier/c7b0b5d3d18245ba8f320e01e8870034 is 101, key is TestNs:TestTable,1,1730994512992.7319af31ce83708d5bbd35fc3fa17e8e./rep_barrier:/1730994522584/DeleteFamily/seqid=0 2024-11-07T15:48:43,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35461 is added to blk_1073741851_1027 (size=5823) 2024-11-07T15:48:43,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40735 is added to blk_1073741851_1027 (size=5823) 2024-11-07T15:48:43,228 INFO [RS_CLOSE_META-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=515 B at sequenceid=65 (bloomFilter=true), to=hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/hbase/meta/1588230740/.tmp/rep_barrier/c7b0b5d3d18245ba8f320e01e8870034 2024-11-07T15:48:43,251 DEBUG [RS_CLOSE_META-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/hbase/meta/1588230740/.tmp/table/09ebea99b19c4da0ad7a64d872245c66 is 95, key is TestNs:TestTable,1,1730994512992.7319af31ce83708d5bbd35fc3fa17e8e./table:/1730994522584/DeleteFamily/seqid=0 2024-11-07T15:48:43,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35461 is added to blk_1073741852_1028 (size=5966) 2024-11-07T15:48:43,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40735 is added to blk_1073741852_1028 (size=5966) 2024-11-07T15:48:43,258 INFO [RS_CLOSE_META-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.47 KB at sequenceid=65 (bloomFilter=true), to=hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/hbase/meta/1588230740/.tmp/table/09ebea99b19c4da0ad7a64d872245c66 2024-11-07T15:48:43,266 DEBUG [RS_CLOSE_META-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/hbase/meta/1588230740/.tmp/info/7fbdbe74bbd4408793be091ba7d53d25 as hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/hbase/meta/1588230740/info/7fbdbe74bbd4408793be091ba7d53d25 2024-11-07T15:48:43,274 INFO [RS_CLOSE_META-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/hbase/meta/1588230740/info/7fbdbe74bbd4408793be091ba7d53d25, entries=21, sequenceid=65, filesize=7.2 K 2024-11-07T15:48:43,276 DEBUG [RS_CLOSE_META-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/hbase/meta/1588230740/.tmp/ns/219bd348661e4a9f94dba25d7b236bf8 as hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/hbase/meta/1588230740/ns/219bd348661e4a9f94dba25d7b236bf8 2024-11-07T15:48:43,284 INFO [RS_CLOSE_META-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/hbase/meta/1588230740/ns/219bd348661e4a9f94dba25d7b236bf8, entries=8, sequenceid=65, filesize=5.6 K 2024-11-07T15:48:43,285 DEBUG [RS_CLOSE_META-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/hbase/meta/1588230740/.tmp/rep_barrier/c7b0b5d3d18245ba8f320e01e8870034 as hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/hbase/meta/1588230740/rep_barrier/c7b0b5d3d18245ba8f320e01e8870034 2024-11-07T15:48:43,293 INFO [RS_CLOSE_META-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/hbase/meta/1588230740/rep_barrier/c7b0b5d3d18245ba8f320e01e8870034, entries=6, sequenceid=65, filesize=5.7 K 2024-11-07T15:48:43,294 DEBUG [RS_CLOSE_META-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/hbase/meta/1588230740/.tmp/table/09ebea99b19c4da0ad7a64d872245c66 as hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/hbase/meta/1588230740/table/09ebea99b19c4da0ad7a64d872245c66 2024-11-07T15:48:43,301 INFO [RS_CLOSE_META-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/hbase/meta/1588230740/table/09ebea99b19c4da0ad7a64d872245c66, entries=12, sequenceid=65, filesize=5.8 K 2024-11-07T15:48:43,302 INFO [RS_CLOSE_META-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~13.33 KB/13653, heapSize ~24.48 KB/25072, currentSize=0 B/0 for 1588230740 in 179ms, sequenceid=65, compaction requested=false 2024-11-07T15:48:43,308 DEBUG [RS_CLOSE_META-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/hbase/meta/1588230740/recovered.edits/68.seqid, newMaxSeqId=68, maxSeqId=1 2024-11-07T15:48:43,308 DEBUG [RS_CLOSE_META-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-07T15:48:43,309 INFO [RS_CLOSE_META-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-07T15:48:43,309 DEBUG [RS_CLOSE_META-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1730994523123Running coprocessor pre-close hooks at 1730994523123Disabling compacts and flushes for region at 1730994523123Disabling writes for close at 1730994523123Obtaining lock to block concurrent updates at 1730994523123Preparing flush snapshotting stores in 1588230740 at 1730994523123Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=13653, getHeapSize=25072, getOffHeapSize=0, getCellsCount=139 at 1730994523124 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1730994523124Flushing 1588230740/info: creating writer at 1730994523125 (+1 ms)Flushing 1588230740/info: appending metadata at 1730994523144 (+19 ms)Flushing 1588230740/info: closing flushed file at 1730994523144Flushing 1588230740/ns: creating writer at 1730994523172 (+28 ms)Flushing 1588230740/ns: appending metadata at 1730994523189 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1730994523189Flushing 1588230740/rep_barrier: creating writer at 1730994523204 (+15 ms)Flushing 1588230740/rep_barrier: appending metadata at 1730994523220 (+16 ms)Flushing 1588230740/rep_barrier: closing flushed file at 1730994523220Flushing 1588230740/table: creating writer at 1730994523235 (+15 ms)Flushing 1588230740/table: appending metadata at 1730994523251 (+16 ms)Flushing 1588230740/table: closing flushed file at 1730994523251Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7ffedead: reopening flushed file at 1730994523265 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6441ea1c: reopening flushed file at 1730994523275 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@707e5a1: reopening flushed file at 1730994523284 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@69fa9bee: reopening flushed file at 1730994523293 (+9 ms)Finished flush of dataSize ~13.33 KB/13653, heapSize ~24.48 KB/25072, currentSize=0 B/0 for 1588230740 in 179ms, sequenceid=65, compaction requested=false at 1730994523302 (+9 ms)Writing region close event to WAL at 1730994523304 (+2 ms)Running coprocessor post-close hooks at 1730994523308 (+4 ms)Closed at 1730994523309 (+1 ms) 2024-11-07T15:48:43,309 DEBUG [RS_CLOSE_META-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-07T15:48:43,323 DEBUG [RS:1;c2682f91c1e2:37185 {}] regionserver.HRegionServer(1351): Waiting on 72d83c7eae51146f5b941b52ec6566e1 2024-11-07T15:48:43,323 INFO [RS:0;c2682f91c1e2:34001 {}] regionserver.HRegionServer(976): stopping server c2682f91c1e2,34001,1730994506521; all regions closed. 2024-11-07T15:48:43,323 DEBUG [RS:0;c2682f91c1e2:34001 {}] quotas.QuotaCache(112): Stopping QuotaRefresherChore chore. 2024-11-07T15:48:43,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35461 is added to blk_1073741835_1011 (size=17505) 2024-11-07T15:48:43,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40735 is added to blk_1073741835_1011 (size=17505) 2024-11-07T15:48:43,332 DEBUG [RS:0;c2682f91c1e2:34001 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/oldWALs 2024-11-07T15:48:43,332 INFO [RS:0;c2682f91c1e2:34001 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL c2682f91c1e2%2C34001%2C1730994506521.meta:.meta(num 1730994509067) 2024-11-07T15:48:43,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35461 is added to blk_1073741834_1010 (size=1830) 2024-11-07T15:48:43,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40735 is added to blk_1073741834_1010 (size=1830) 2024-11-07T15:48:43,338 DEBUG [RS:0;c2682f91c1e2:34001 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/oldWALs 2024-11-07T15:48:43,338 INFO [RS:0;c2682f91c1e2:34001 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL c2682f91c1e2%2C34001%2C1730994506521:(num 1730994508728) 2024-11-07T15:48:43,338 DEBUG [RS:0;c2682f91c1e2:34001 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T15:48:43,339 INFO [RS:0;c2682f91c1e2:34001 {}] regionserver.LeaseManager(133): Closed leases 2024-11-07T15:48:43,339 INFO [RS:0;c2682f91c1e2:34001 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-07T15:48:43,339 INFO [RS:0;c2682f91c1e2:34001 {}] hbase.ChoreService(370): Chore service for: regionserver/c2682f91c1e2:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-07T15:48:43,339 INFO [RS:0;c2682f91c1e2:34001 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-07T15:48:43,339 INFO [regionserver/c2682f91c1e2:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-07T15:48:43,340 INFO [RS:0;c2682f91c1e2:34001 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34001 2024-11-07T15:48:43,344 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34001-0x101836b79d20001, quorum=127.0.0.1:50813, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c2682f91c1e2,34001,1730994506521 2024-11-07T15:48:43,344 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38893-0x101836b79d20000, quorum=127.0.0.1:50813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-07T15:48:43,344 INFO [RS:0;c2682f91c1e2:34001 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-07T15:48:43,345 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c2682f91c1e2,34001,1730994506521] 2024-11-07T15:48:43,347 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c2682f91c1e2,34001,1730994506521 already deleted, retry=false 2024-11-07T15:48:43,347 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c2682f91c1e2,34001,1730994506521 expired; onlineServers=1 2024-11-07T15:48:43,447 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34001-0x101836b79d20001, quorum=127.0.0.1:50813, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T15:48:43,447 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34001-0x101836b79d20001, quorum=127.0.0.1:50813, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T15:48:43,448 INFO [RS:0;c2682f91c1e2:34001 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-07T15:48:43,448 INFO [RS:0;c2682f91c1e2:34001 {}] regionserver.HRegionServer(1031): Exiting; stopping=c2682f91c1e2,34001,1730994506521; zookeeper connection closed. 2024-11-07T15:48:43,448 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@68d33949 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@68d33949 2024-11-07T15:48:43,523 DEBUG [RS:1;c2682f91c1e2:37185 {}] regionserver.HRegionServer(1351): Waiting on 72d83c7eae51146f5b941b52ec6566e1 2024-11-07T15:48:43,564 INFO [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=597 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/hbase/quota/72d83c7eae51146f5b941b52ec6566e1/.tmp/q/9e10f9dae8eb4bddb632e5d77990c9ed 2024-11-07T15:48:43,574 INFO [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 9e10f9dae8eb4bddb632e5d77990c9ed 2024-11-07T15:48:43,595 DEBUG [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/hbase/quota/72d83c7eae51146f5b941b52ec6566e1/.tmp/u/0e0c641676544d4ea007e646669e55ba is 43, key is t.TestNs:TestTable/u:/1730994518592/DeleteFamily/seqid=0 2024-11-07T15:48:43,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35461 is added to blk_1073741853_1029 (size=5250) 2024-11-07T15:48:43,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40735 is added to blk_1073741853_1029 (size=5250) 2024-11-07T15:48:43,603 INFO [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=129 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/hbase/quota/72d83c7eae51146f5b941b52ec6566e1/.tmp/u/0e0c641676544d4ea007e646669e55ba 2024-11-07T15:48:43,610 INFO [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 0e0c641676544d4ea007e646669e55ba 2024-11-07T15:48:43,611 DEBUG [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/hbase/quota/72d83c7eae51146f5b941b52ec6566e1/.tmp/q/9e10f9dae8eb4bddb632e5d77990c9ed as hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/hbase/quota/72d83c7eae51146f5b941b52ec6566e1/q/9e10f9dae8eb4bddb632e5d77990c9ed 2024-11-07T15:48:43,619 INFO [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 9e10f9dae8eb4bddb632e5d77990c9ed 2024-11-07T15:48:43,620 INFO [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/hbase/quota/72d83c7eae51146f5b941b52ec6566e1/q/9e10f9dae8eb4bddb632e5d77990c9ed, entries=5, sequenceid=17, filesize=5.2 K 2024-11-07T15:48:43,621 DEBUG [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/hbase/quota/72d83c7eae51146f5b941b52ec6566e1/.tmp/u/0e0c641676544d4ea007e646669e55ba as hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/hbase/quota/72d83c7eae51146f5b941b52ec6566e1/u/0e0c641676544d4ea007e646669e55ba 2024-11-07T15:48:43,625 INFO [regionserver/c2682f91c1e2:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-07T15:48:43,625 INFO [regionserver/c2682f91c1e2:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-07T15:48:43,625 INFO [regionserver/c2682f91c1e2:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: SpaceQuotaRefresherChore was stopped 2024-11-07T15:48:43,631 INFO [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 0e0c641676544d4ea007e646669e55ba 2024-11-07T15:48:43,631 INFO [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/hbase/quota/72d83c7eae51146f5b941b52ec6566e1/u/0e0c641676544d4ea007e646669e55ba, entries=4, sequenceid=17, filesize=5.1 K 2024-11-07T15:48:43,632 INFO [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~726 B/726, heapSize ~2.41 KB/2464, currentSize=0 B/0 for 72d83c7eae51146f5b941b52ec6566e1 in 509ms, sequenceid=17, compaction requested=false 2024-11-07T15:48:43,640 DEBUG [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/data/hbase/quota/72d83c7eae51146f5b941b52ec6566e1/recovered.edits/20.seqid, newMaxSeqId=20, maxSeqId=1 2024-11-07T15:48:43,641 INFO [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed hbase:quota,,1730994509346.72d83c7eae51146f5b941b52ec6566e1. 2024-11-07T15:48:43,641 DEBUG [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 72d83c7eae51146f5b941b52ec6566e1: Waiting for close lock at 1730994523122Running coprocessor pre-close hooks at 1730994523123 (+1 ms)Disabling compacts and flushes for region at 1730994523123Disabling writes for close at 1730994523123Obtaining lock to block concurrent updates at 1730994523123Preparing flush snapshotting stores in 72d83c7eae51146f5b941b52ec6566e1 at 1730994523123Finished memstore snapshotting hbase:quota,,1730994509346.72d83c7eae51146f5b941b52ec6566e1., syncing WAL and waiting on mvcc, flushsize=dataSize=726, getHeapSize=2464, getOffHeapSize=0, getCellsCount=17 at 1730994523124 (+1 ms)Flushing stores of hbase:quota,,1730994509346.72d83c7eae51146f5b941b52ec6566e1. at 1730994523125 (+1 ms)Flushing 72d83c7eae51146f5b941b52ec6566e1/q: creating writer at 1730994523125Flushing 72d83c7eae51146f5b941b52ec6566e1/q: appending metadata at 1730994523142 (+17 ms)Flushing 72d83c7eae51146f5b941b52ec6566e1/q: closing flushed file at 1730994523142Flushing 72d83c7eae51146f5b941b52ec6566e1/u: creating writer at 1730994523575 (+433 ms)Flushing 72d83c7eae51146f5b941b52ec6566e1/u: appending metadata at 1730994523594 (+19 ms)Flushing 72d83c7eae51146f5b941b52ec6566e1/u: closing flushed file at 1730994523594Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@14080528: reopening flushed file at 1730994523610 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@40e96f87: reopening flushed file at 1730994523620 (+10 ms)Finished flush of dataSize ~726 B/726, heapSize ~2.41 KB/2464, currentSize=0 B/0 for 72d83c7eae51146f5b941b52ec6566e1 in 509ms, sequenceid=17, compaction requested=false at 1730994523632 (+12 ms)Writing region close event to WAL at 1730994523636 (+4 ms)Running coprocessor post-close hooks at 1730994523641 (+5 ms)Closed at 1730994523641 2024-11-07T15:48:43,641 DEBUG [RS_CLOSE_REGION-regionserver/c2682f91c1e2:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:quota,,1730994509346.72d83c7eae51146f5b941b52ec6566e1. 2024-11-07T15:48:43,724 INFO [RS:1;c2682f91c1e2:37185 {}] regionserver.HRegionServer(976): stopping server c2682f91c1e2,37185,1730994506636; all regions closed. 2024-11-07T15:48:43,724 DEBUG [RS:1;c2682f91c1e2:37185 {}] quotas.QuotaCache(112): Stopping QuotaRefresherChore chore. 2024-11-07T15:48:43,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40735 is added to blk_1073741833_1009 (size=6152) 2024-11-07T15:48:43,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35461 is added to blk_1073741833_1009 (size=6152) 2024-11-07T15:48:43,732 DEBUG [RS:1;c2682f91c1e2:37185 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/oldWALs 2024-11-07T15:48:43,733 INFO [RS:1;c2682f91c1e2:37185 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL c2682f91c1e2%2C37185%2C1730994506636:(num 1730994508727) 2024-11-07T15:48:43,733 DEBUG [RS:1;c2682f91c1e2:37185 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-07T15:48:43,733 INFO [RS:1;c2682f91c1e2:37185 {}] regionserver.LeaseManager(133): Closed leases 2024-11-07T15:48:43,733 INFO [RS:1;c2682f91c1e2:37185 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-07T15:48:43,733 INFO [RS:1;c2682f91c1e2:37185 {}] hbase.ChoreService(370): Chore service for: regionserver/c2682f91c1e2:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-07T15:48:43,733 INFO [RS:1;c2682f91c1e2:37185 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-07T15:48:43,733 INFO [RS:1;c2682f91c1e2:37185 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-07T15:48:43,733 INFO [RS:1;c2682f91c1e2:37185 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-07T15:48:43,734 INFO [RS:1;c2682f91c1e2:37185 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-07T15:48:43,734 INFO [RS:1;c2682f91c1e2:37185 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37185 2024-11-07T15:48:43,734 INFO [regionserver/c2682f91c1e2:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-07T15:48:43,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38893-0x101836b79d20000, quorum=127.0.0.1:50813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-07T15:48:43,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37185-0x101836b79d20002, quorum=127.0.0.1:50813, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c2682f91c1e2,37185,1730994506636 2024-11-07T15:48:43,736 INFO [RS:1;c2682f91c1e2:37185 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-07T15:48:43,737 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c2682f91c1e2,37185,1730994506636] 2024-11-07T15:48:43,739 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c2682f91c1e2,37185,1730994506636 already deleted, retry=false 2024-11-07T15:48:43,739 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c2682f91c1e2,37185,1730994506636 expired; onlineServers=0 2024-11-07T15:48:43,739 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'c2682f91c1e2,38893,1730994505717' ***** 2024-11-07T15:48:43,739 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-07T15:48:43,740 INFO [M:0;c2682f91c1e2:38893 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-07T15:48:43,740 INFO [M:0;c2682f91c1e2:38893 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-07T15:48:43,740 DEBUG [M:0;c2682f91c1e2:38893 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-07T15:48:43,740 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-07T15:48:43,740 DEBUG [M:0;c2682f91c1e2:38893 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-07T15:48:43,740 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster-HFileCleaner.large.0-1730994508296 {}] cleaner.HFileCleaner(306): Exit Thread[master/c2682f91c1e2:0:becomeActiveMaster-HFileCleaner.large.0-1730994508296,5,FailOnTimeoutGroup] 2024-11-07T15:48:43,740 DEBUG [master/c2682f91c1e2:0:becomeActiveMaster-HFileCleaner.small.0-1730994508297 {}] cleaner.HFileCleaner(306): Exit Thread[master/c2682f91c1e2:0:becomeActiveMaster-HFileCleaner.small.0-1730994508297,5,FailOnTimeoutGroup] 2024-11-07T15:48:43,741 INFO [M:0;c2682f91c1e2:38893 {}] hbase.ChoreService(370): Chore service for: master/c2682f91c1e2:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS, ScheduledChore name=QuotaObserverChore, period=60000, unit=MILLISECONDS] on shutdown 2024-11-07T15:48:43,741 INFO [M:0;c2682f91c1e2:38893 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-07T15:48:43,741 DEBUG [M:0;c2682f91c1e2:38893 {}] master.HMaster(1795): Stopping service threads 2024-11-07T15:48:43,741 INFO [M:0;c2682f91c1e2:38893 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-07T15:48:43,741 INFO [M:0;c2682f91c1e2:38893 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-07T15:48:43,741 ERROR [M:0;c2682f91c1e2:38893 {}] procedure2.ProcedureExecutor(763): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[HFileArchiver-1,5,PEWorkerGroup] Thread[HFileArchiver-2,5,PEWorkerGroup] Thread[HFileArchiver-3,5,PEWorkerGroup] Thread[HFileArchiver-4,5,PEWorkerGroup] Thread[HFileArchiver-5,5,PEWorkerGroup] 2024-11-07T15:48:43,742 INFO [M:0;c2682f91c1e2:38893 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-07T15:48:43,742 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-07T15:48:43,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38893-0x101836b79d20000, quorum=127.0.0.1:50813, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-07T15:48:43,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38893-0x101836b79d20000, quorum=127.0.0.1:50813, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-07T15:48:43,744 DEBUG [M:0;c2682f91c1e2:38893 {}] zookeeper.ZKUtil(347): master:38893-0x101836b79d20000, quorum=127.0.0.1:50813, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-07T15:48:43,744 WARN [M:0;c2682f91c1e2:38893 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-07T15:48:43,746 INFO [M:0;c2682f91c1e2:38893 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/.lastflushedseqids 2024-11-07T15:48:43,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40735 is added to blk_1073741854_1030 (size=134) 2024-11-07T15:48:43,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35461 is added to blk_1073741854_1030 (size=134) 2024-11-07T15:48:43,776 INFO [M:0;c2682f91c1e2:38893 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-07T15:48:43,776 INFO [M:0;c2682f91c1e2:38893 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-07T15:48:43,776 DEBUG [M:0;c2682f91c1e2:38893 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-07T15:48:43,776 INFO [M:0;c2682f91c1e2:38893 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T15:48:43,776 DEBUG [M:0;c2682f91c1e2:38893 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T15:48:43,776 DEBUG [M:0;c2682f91c1e2:38893 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-07T15:48:43,776 DEBUG [M:0;c2682f91c1e2:38893 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T15:48:43,777 INFO [M:0;c2682f91c1e2:38893 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=157.72 KB heapSize=190.77 KB 2024-11-07T15:48:43,795 DEBUG [M:0;c2682f91c1e2:38893 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/13d9eca88e1743439498dc9eeaf67de2 is 82, key is hbase:meta,,1/info:regioninfo/1730994509174/Put/seqid=0 2024-11-07T15:48:43,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40735 is added to blk_1073741855_1031 (size=5672) 2024-11-07T15:48:43,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35461 is added to blk_1073741855_1031 (size=5672) 2024-11-07T15:48:43,807 INFO [M:0;c2682f91c1e2:38893 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=375 (bloomFilter=true), to=hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/13d9eca88e1743439498dc9eeaf67de2 2024-11-07T15:48:43,833 DEBUG [M:0;c2682f91c1e2:38893 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/71630d7ebbc84664bcd8ee2a9a7c0a91 is 958, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1730994510255/Put/seqid=0 2024-11-07T15:48:43,839 INFO [RS:1;c2682f91c1e2:37185 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-07T15:48:43,839 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37185-0x101836b79d20002, quorum=127.0.0.1:50813, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T15:48:43,839 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37185-0x101836b79d20002, quorum=127.0.0.1:50813, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T15:48:43,839 INFO [RS:1;c2682f91c1e2:37185 {}] regionserver.HRegionServer(1031): Exiting; stopping=c2682f91c1e2,37185,1730994506636; zookeeper connection closed. 2024-11-07T15:48:43,840 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3394c135 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3394c135 2024-11-07T15:48:43,841 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-07T15:48:43,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35461 is added to blk_1073741856_1032 (size=13432) 2024-11-07T15:48:43,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40735 is added to blk_1073741856_1032 (size=13432) 2024-11-07T15:48:43,843 INFO [M:0;c2682f91c1e2:38893 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=157.10 KB at sequenceid=375 (bloomFilter=true), to=hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/71630d7ebbc84664bcd8ee2a9a7c0a91 2024-11-07T15:48:43,874 DEBUG [M:0;c2682f91c1e2:38893 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/13229e634c134277bb2678aad4d51224 is 69, key is c2682f91c1e2,34001,1730994506521/rs:state/1730994508441/Put/seqid=0 2024-11-07T15:48:43,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35461 is added to blk_1073741857_1033 (size=5224) 2024-11-07T15:48:43,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40735 is added to blk_1073741857_1033 (size=5224) 2024-11-07T15:48:43,882 INFO [M:0;c2682f91c1e2:38893 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=375 (bloomFilter=true), to=hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/13229e634c134277bb2678aad4d51224 2024-11-07T15:48:43,891 DEBUG [M:0;c2682f91c1e2:38893 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/13d9eca88e1743439498dc9eeaf67de2 as hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/13d9eca88e1743439498dc9eeaf67de2 2024-11-07T15:48:43,899 INFO [M:0;c2682f91c1e2:38893 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/13d9eca88e1743439498dc9eeaf67de2, entries=8, sequenceid=375, filesize=5.5 K 2024-11-07T15:48:43,900 DEBUG [M:0;c2682f91c1e2:38893 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/71630d7ebbc84664bcd8ee2a9a7c0a91 as hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/71630d7ebbc84664bcd8ee2a9a7c0a91 2024-11-07T15:48:43,907 INFO [M:0;c2682f91c1e2:38893 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/71630d7ebbc84664bcd8ee2a9a7c0a91, entries=44, sequenceid=375, filesize=13.1 K 2024-11-07T15:48:43,909 DEBUG [M:0;c2682f91c1e2:38893 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/13229e634c134277bb2678aad4d51224 as hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/13229e634c134277bb2678aad4d51224 2024-11-07T15:48:43,916 INFO [M:0;c2682f91c1e2:38893 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44951/user/jenkins/test-data/ba547aff-4805-894f-64bc-dac1a3ea1a61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/13229e634c134277bb2678aad4d51224, entries=2, sequenceid=375, filesize=5.1 K 2024-11-07T15:48:43,917 INFO [M:0;c2682f91c1e2:38893 {}] regionserver.HRegion(3140): Finished flush of dataSize ~157.72 KB/161503, heapSize ~190.47 KB/195040, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 140ms, sequenceid=375, compaction requested=false 2024-11-07T15:48:43,919 INFO [M:0;c2682f91c1e2:38893 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-07T15:48:43,919 DEBUG [M:0;c2682f91c1e2:38893 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1730994523776Disabling compacts and flushes for region at 1730994523776Disabling writes for close at 1730994523776Obtaining lock to block concurrent updates at 1730994523777 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1730994523777Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=161503, getHeapSize=195280, getOffHeapSize=0, getCellsCount=434 at 1730994523777Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1730994523778 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1730994523778Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1730994523795 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1730994523795Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1730994523814 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1730994523832 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1730994523832Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1730994523851 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1730994523873 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1730994523873Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@59fdf80a: reopening flushed file at 1730994523890 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7c98b384: reopening flushed file at 1730994523899 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@537e933: reopening flushed file at 1730994523908 (+9 ms)Finished flush of dataSize ~157.72 KB/161503, heapSize ~190.47 KB/195040, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 140ms, sequenceid=375, compaction requested=false at 1730994523918 (+10 ms)Writing region close event to WAL at 1730994523919 (+1 ms)Closed at 1730994523919 2024-11-07T15:48:43,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40735 is added to blk_1073741830_1006 (size=186533) 2024-11-07T15:48:43,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35461 is added to blk_1073741830_1006 (size=186533) 2024-11-07T15:48:43,923 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-07T15:48:43,924 INFO [M:0;c2682f91c1e2:38893 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-07T15:48:43,924 INFO [M:0;c2682f91c1e2:38893 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38893 2024-11-07T15:48:43,924 INFO [M:0;c2682f91c1e2:38893 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-07T15:48:44,026 INFO [M:0;c2682f91c1e2:38893 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-07T15:48:44,026 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38893-0x101836b79d20000, quorum=127.0.0.1:50813, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T15:48:44,027 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38893-0x101836b79d20000, quorum=127.0.0.1:50813, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-07T15:48:44,042 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@36a28386{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T15:48:44,045 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7ccb07ae{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T15:48:44,046 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T15:48:44,046 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6ff9f4a9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T15:48:44,046 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7416e56c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f5959759-6c57-622f-21bf-cf0b32045edd/hadoop.log.dir/,STOPPED} 2024-11-07T15:48:44,051 WARN [BP-2051735518-172.17.0.2-1730994502303 heartbeating to localhost/127.0.0.1:44951 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-07T15:48:44,051 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-07T15:48:44,051 WARN [BP-2051735518-172.17.0.2-1730994502303 heartbeating to localhost/127.0.0.1:44951 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2051735518-172.17.0.2-1730994502303 (Datanode Uuid 09a05c23-7213-4c48-afb1-b798647ec89a) service to localhost/127.0.0.1:44951 2024-11-07T15:48:44,051 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-07T15:48:44,055 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f5959759-6c57-622f-21bf-cf0b32045edd/cluster_1448e0b2-db09-f2c0-aa15-452c85982d01/data/data3/current/BP-2051735518-172.17.0.2-1730994502303 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T15:48:44,055 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f5959759-6c57-622f-21bf-cf0b32045edd/cluster_1448e0b2-db09-f2c0-aa15-452c85982d01/data/data4/current/BP-2051735518-172.17.0.2-1730994502303 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T15:48:44,056 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-07T15:48:44,080 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4756d35c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-07T15:48:44,081 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@42690001{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T15:48:44,081 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T15:48:44,081 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@167d5b17{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T15:48:44,082 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6a3983b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f5959759-6c57-622f-21bf-cf0b32045edd/hadoop.log.dir/,STOPPED} 2024-11-07T15:48:44,083 WARN [BP-2051735518-172.17.0.2-1730994502303 heartbeating to localhost/127.0.0.1:44951 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-07T15:48:44,083 WARN [BP-2051735518-172.17.0.2-1730994502303 heartbeating to localhost/127.0.0.1:44951 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2051735518-172.17.0.2-1730994502303 (Datanode Uuid 2efb23b7-1301-4064-9c0a-e9338a092385) service to localhost/127.0.0.1:44951 2024-11-07T15:48:44,084 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f5959759-6c57-622f-21bf-cf0b32045edd/cluster_1448e0b2-db09-f2c0-aa15-452c85982d01/data/data1/current/BP-2051735518-172.17.0.2-1730994502303 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T15:48:44,084 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f5959759-6c57-622f-21bf-cf0b32045edd/cluster_1448e0b2-db09-f2c0-aa15-452c85982d01/data/data2/current/BP-2051735518-172.17.0.2-1730994502303 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-07T15:48:44,084 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-07T15:48:44,084 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-07T15:48:44,085 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-07T15:48:44,094 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@300f56e1{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-07T15:48:44,095 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@51edec43{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-07T15:48:44,095 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-07T15:48:44,095 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3b1a9f49{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-07T15:48:44,095 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4d649244{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f5959759-6c57-622f-21bf-cf0b32045edd/hadoop.log.dir/,STOPPED} 2024-11-07T15:48:44,104 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-07T15:48:44,138 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down