2024-12-01 23:53:48,309 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@6f204a1a 2024-12-01 23:53:48,325 main DEBUG Took 0.013200 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-01 23:53:48,326 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-01 23:53:48,326 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-01 23:53:48,328 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-01 23:53:48,330 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 23:53:48,363 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-01 23:53:48,382 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 23:53:48,383 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 23:53:48,387 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 23:53:48,387 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 23:53:48,388 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 23:53:48,388 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 23:53:48,389 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 23:53:48,389 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 23:53:48,390 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 23:53:48,390 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 23:53:48,391 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 23:53:48,391 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 23:53:48,392 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 23:53:48,393 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 23:53:48,393 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 23:53:48,394 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 23:53:48,394 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 23:53:48,395 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 23:53:48,395 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 23:53:48,396 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 23:53:48,396 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 23:53:48,397 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 23:53:48,398 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 23:53:48,398 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 23:53:48,399 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 23:53:48,400 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-01 23:53:48,402 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 23:53:48,403 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-01 23:53:48,406 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-01 23:53:48,406 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-01 23:53:48,408 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-01 23:53:48,409 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-01 23:53:48,419 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-01 23:53:48,426 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-01 23:53:48,428 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-01 23:53:48,428 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-01 23:53:48,429 main DEBUG createAppenders(={Console}) 2024-12-01 23:53:48,430 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@6f204a1a initialized 2024-12-01 23:53:48,431 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@6f204a1a 2024-12-01 23:53:48,431 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@6f204a1a OK. 2024-12-01 23:53:48,432 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-01 23:53:48,432 main DEBUG OutputStream closed 2024-12-01 23:53:48,432 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-01 23:53:48,433 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-01 23:53:48,433 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@21e360a OK 2024-12-01 23:53:48,564 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-01 23:53:48,567 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-01 23:53:48,568 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-01 23:53:48,570 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-01 23:53:48,571 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-01 23:53:48,571 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-01 23:53:48,572 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-01 23:53:48,572 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-01 23:53:48,572 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-01 23:53:48,573 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-01 23:53:48,573 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-01 23:53:48,573 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-01 23:53:48,574 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-01 23:53:48,574 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-01 23:53:48,575 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-01 23:53:48,575 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-01 23:53:48,575 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-01 23:53:48,576 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-01 23:53:48,581 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-01 23:53:48,582 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@42b02722) with optional ClassLoader: null 2024-12-01 23:53:48,583 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-01 23:53:48,585 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@42b02722] started OK. 2024-12-01T23:53:48,606 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle timeout: 13 mins 2024-12-01 23:53:48,608 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-01 23:53:48,609 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-01T23:53:49,039 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/97d8b6f8-e647-7311-59ea-c760fa010fa3 2024-12-01T23:53:49,077 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=2, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-01T23:53:49,114 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/97d8b6f8-e647-7311-59ea-c760fa010fa3/cluster_a168f772-ce25-779e-39a7-1ee39f6fa68c, deleteOnExit=true 2024-12-01T23:53:49,114 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-01T23:53:49,115 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/97d8b6f8-e647-7311-59ea-c760fa010fa3/test.cache.data in system properties and HBase conf 2024-12-01T23:53:49,116 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/97d8b6f8-e647-7311-59ea-c760fa010fa3/hadoop.tmp.dir in system properties and HBase conf 2024-12-01T23:53:49,117 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/97d8b6f8-e647-7311-59ea-c760fa010fa3/hadoop.log.dir in system properties and HBase conf 2024-12-01T23:53:49,118 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/97d8b6f8-e647-7311-59ea-c760fa010fa3/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-01T23:53:49,118 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/97d8b6f8-e647-7311-59ea-c760fa010fa3/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-01T23:53:49,119 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-01T23:53:49,306 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-01T23:53:49,452 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-01T23:53:49,460 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/97d8b6f8-e647-7311-59ea-c760fa010fa3/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-01T23:53:49,461 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/97d8b6f8-e647-7311-59ea-c760fa010fa3/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-01T23:53:49,461 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/97d8b6f8-e647-7311-59ea-c760fa010fa3/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-01T23:53:49,462 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/97d8b6f8-e647-7311-59ea-c760fa010fa3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-01T23:53:49,463 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/97d8b6f8-e647-7311-59ea-c760fa010fa3/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-01T23:53:49,463 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/97d8b6f8-e647-7311-59ea-c760fa010fa3/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-01T23:53:49,467 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/97d8b6f8-e647-7311-59ea-c760fa010fa3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-01T23:53:49,467 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/97d8b6f8-e647-7311-59ea-c760fa010fa3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-01T23:53:49,468 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/97d8b6f8-e647-7311-59ea-c760fa010fa3/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-01T23:53:49,468 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/97d8b6f8-e647-7311-59ea-c760fa010fa3/nfs.dump.dir in system properties and HBase conf 2024-12-01T23:53:49,469 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/97d8b6f8-e647-7311-59ea-c760fa010fa3/java.io.tmpdir in system properties and HBase conf 2024-12-01T23:53:49,469 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/97d8b6f8-e647-7311-59ea-c760fa010fa3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-01T23:53:49,470 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/97d8b6f8-e647-7311-59ea-c760fa010fa3/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-01T23:53:49,470 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/97d8b6f8-e647-7311-59ea-c760fa010fa3/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-01T23:53:50,722 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-01T23:53:50,813 INFO [Time-limited test {}] log.Log(170): Logging initialized @3470ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-01T23:53:50,896 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T23:53:50,976 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T23:53:50,999 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T23:53:51,000 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T23:53:51,002 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-01T23:53:51,018 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T23:53:51,021 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1e977c95{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/97d8b6f8-e647-7311-59ea-c760fa010fa3/hadoop.log.dir/,AVAILABLE} 2024-12-01T23:53:51,022 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ce21d39{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-01T23:53:51,230 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@358110da{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/97d8b6f8-e647-7311-59ea-c760fa010fa3/java.io.tmpdir/jetty-localhost-40783-hadoop-hdfs-3_4_1-tests_jar-_-any-1571876112731363131/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-01T23:53:51,238 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@105bda08{HTTP/1.1, (http/1.1)}{localhost:40783} 2024-12-01T23:53:51,238 INFO [Time-limited test {}] server.Server(415): Started @3896ms 2024-12-01T23:53:51,905 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T23:53:51,914 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T23:53:51,915 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T23:53:51,916 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T23:53:51,916 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-01T23:53:51,917 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1aba4a96{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/97d8b6f8-e647-7311-59ea-c760fa010fa3/hadoop.log.dir/,AVAILABLE} 2024-12-01T23:53:51,918 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6dafd2a7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-01T23:53:52,046 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@71bdb202{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/97d8b6f8-e647-7311-59ea-c760fa010fa3/java.io.tmpdir/jetty-localhost-35719-hadoop-hdfs-3_4_1-tests_jar-_-any-9882371889728750447/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T23:53:52,047 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@69099528{HTTP/1.1, (http/1.1)}{localhost:35719} 2024-12-01T23:53:52,047 INFO [Time-limited test {}] server.Server(415): Started @4705ms 2024-12-01T23:53:52,113 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-01T23:53:52,295 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T23:53:52,302 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T23:53:52,313 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T23:53:52,313 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T23:53:52,314 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-01T23:53:52,316 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@77d683b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/97d8b6f8-e647-7311-59ea-c760fa010fa3/hadoop.log.dir/,AVAILABLE} 2024-12-01T23:53:52,318 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@30b97b9d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-01T23:53:52,466 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@58be3a95{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/97d8b6f8-e647-7311-59ea-c760fa010fa3/java.io.tmpdir/jetty-localhost-42795-hadoop-hdfs-3_4_1-tests_jar-_-any-15823647933826749091/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T23:53:52,467 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@296c17de{HTTP/1.1, (http/1.1)}{localhost:42795} 2024-12-01T23:53:52,467 INFO [Time-limited test {}] server.Server(415): Started @5125ms 2024-12-01T23:53:52,471 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-01T23:53:53,505 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/97d8b6f8-e647-7311-59ea-c760fa010fa3/cluster_a168f772-ce25-779e-39a7-1ee39f6fa68c/data/data3/current/BP-1358816574-172.17.0.2-1733097230195/current, will proceed with Du for space computation calculation, 2024-12-01T23:53:53,505 WARN [Thread-100 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/97d8b6f8-e647-7311-59ea-c760fa010fa3/cluster_a168f772-ce25-779e-39a7-1ee39f6fa68c/data/data4/current/BP-1358816574-172.17.0.2-1733097230195/current, will proceed with Du for space computation calculation, 2024-12-01T23:53:53,505 WARN [Thread-99 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/97d8b6f8-e647-7311-59ea-c760fa010fa3/cluster_a168f772-ce25-779e-39a7-1ee39f6fa68c/data/data1/current/BP-1358816574-172.17.0.2-1733097230195/current, will proceed with Du for space computation calculation, 2024-12-01T23:53:53,507 WARN [Thread-101 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/97d8b6f8-e647-7311-59ea-c760fa010fa3/cluster_a168f772-ce25-779e-39a7-1ee39f6fa68c/data/data2/current/BP-1358816574-172.17.0.2-1733097230195/current, will proceed with Du for space computation calculation, 2024-12-01T23:53:53,577 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-01T23:53:53,578 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-01T23:53:53,633 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa254c9a13eb6f31 with lease ID 0xbac3f0f0d731ddf: Processing first storage report for DS-e911d9dd-51de-49e2-a93a-82136a215d17 from datanode DatanodeRegistration(127.0.0.1:39935, datanodeUuid=f55fbbc8-ba20-4f8f-aded-90ea86fc4d5f, infoPort=42783, infoSecurePort=0, ipcPort=37053, storageInfo=lv=-57;cid=testClusterID;nsid=984655595;c=1733097230195) 2024-12-01T23:53:53,635 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa254c9a13eb6f31 with lease ID 0xbac3f0f0d731ddf: from storage DS-e911d9dd-51de-49e2-a93a-82136a215d17 node DatanodeRegistration(127.0.0.1:39935, datanodeUuid=f55fbbc8-ba20-4f8f-aded-90ea86fc4d5f, infoPort=42783, infoSecurePort=0, ipcPort=37053, storageInfo=lv=-57;cid=testClusterID;nsid=984655595;c=1733097230195), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-12-01T23:53:53,635 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc31108d8c0adb08c with lease ID 0xbac3f0f0d731dde: Processing first storage report for DS-9d7ebde0-20b7-41fb-8d71-4bf5406ae646 from datanode DatanodeRegistration(127.0.0.1:46129, datanodeUuid=c97d0686-e6f4-4994-9ab8-3ca0a242015c, infoPort=37593, infoSecurePort=0, ipcPort=34761, storageInfo=lv=-57;cid=testClusterID;nsid=984655595;c=1733097230195) 2024-12-01T23:53:53,635 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc31108d8c0adb08c with lease ID 0xbac3f0f0d731dde: from storage DS-9d7ebde0-20b7-41fb-8d71-4bf5406ae646 node DatanodeRegistration(127.0.0.1:46129, datanodeUuid=c97d0686-e6f4-4994-9ab8-3ca0a242015c, infoPort=37593, infoSecurePort=0, ipcPort=34761, storageInfo=lv=-57;cid=testClusterID;nsid=984655595;c=1733097230195), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T23:53:53,636 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa254c9a13eb6f31 with lease ID 0xbac3f0f0d731ddf: Processing first storage report for DS-5d71fc34-0e16-49b6-a262-473a394ff786 from datanode DatanodeRegistration(127.0.0.1:39935, datanodeUuid=f55fbbc8-ba20-4f8f-aded-90ea86fc4d5f, infoPort=42783, infoSecurePort=0, ipcPort=37053, storageInfo=lv=-57;cid=testClusterID;nsid=984655595;c=1733097230195) 2024-12-01T23:53:53,636 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa254c9a13eb6f31 with lease ID 0xbac3f0f0d731ddf: from storage DS-5d71fc34-0e16-49b6-a262-473a394ff786 node DatanodeRegistration(127.0.0.1:39935, datanodeUuid=f55fbbc8-ba20-4f8f-aded-90ea86fc4d5f, infoPort=42783, infoSecurePort=0, ipcPort=37053, storageInfo=lv=-57;cid=testClusterID;nsid=984655595;c=1733097230195), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T23:53:53,636 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc31108d8c0adb08c with lease ID 0xbac3f0f0d731dde: Processing first storage report for DS-9358ece0-a803-40d4-9edb-25f40edb2c25 from datanode DatanodeRegistration(127.0.0.1:46129, datanodeUuid=c97d0686-e6f4-4994-9ab8-3ca0a242015c, infoPort=37593, infoSecurePort=0, ipcPort=34761, storageInfo=lv=-57;cid=testClusterID;nsid=984655595;c=1733097230195) 2024-12-01T23:53:53,636 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc31108d8c0adb08c with lease ID 0xbac3f0f0d731dde: from storage DS-9358ece0-a803-40d4-9edb-25f40edb2c25 node DatanodeRegistration(127.0.0.1:46129, datanodeUuid=c97d0686-e6f4-4994-9ab8-3ca0a242015c, infoPort=37593, infoSecurePort=0, ipcPort=34761, storageInfo=lv=-57;cid=testClusterID;nsid=984655595;c=1733097230195), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T23:53:53,667 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/97d8b6f8-e647-7311-59ea-c760fa010fa3 2024-12-01T23:53:53,744 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/97d8b6f8-e647-7311-59ea-c760fa010fa3/cluster_a168f772-ce25-779e-39a7-1ee39f6fa68c/zookeeper_0, clientPort=61873, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/97d8b6f8-e647-7311-59ea-c760fa010fa3/cluster_a168f772-ce25-779e-39a7-1ee39f6fa68c/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/97d8b6f8-e647-7311-59ea-c760fa010fa3/cluster_a168f772-ce25-779e-39a7-1ee39f6fa68c/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-01T23:53:53,754 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=61873 2024-12-01T23:53:53,767 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T23:53:53,771 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T23:53:53,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39935 is added to blk_1073741825_1001 (size=7) 2024-12-01T23:53:53,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46129 is added to blk_1073741825_1001 (size=7) 2024-12-01T23:53:54,379 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18 with version=8 2024-12-01T23:53:54,379 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/hbase-staging 2024-12-01T23:53:54,453 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-01T23:53:54,672 INFO [Time-limited test {}] client.ConnectionUtils(128): master/579b0f681375:0 server-side Connection retries=45 2024-12-01T23:53:54,680 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T23:53:54,680 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-01T23:53:54,684 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-01T23:53:54,685 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T23:53:54,685 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-01T23:53:54,801 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-01T23:53:54,853 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-01T23:53:54,861 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-01T23:53:54,864 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-01T23:53:54,886 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 105979 (auto-detected) 2024-12-01T23:53:54,887 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-01T23:53:54,904 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40611 2024-12-01T23:53:54,922 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40611 connecting to ZooKeeper ensemble=127.0.0.1:61873 2024-12-01T23:53:55,004 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:406110x0, quorum=127.0.0.1:61873, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-01T23:53:55,007 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40611-0x101949ab0000000 connected 2024-12-01T23:53:55,081 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T23:53:55,083 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T23:53:55,092 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40611-0x101949ab0000000, quorum=127.0.0.1:61873, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T23:53:55,096 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18, hbase.cluster.distributed=false 2024-12-01T23:53:55,117 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40611-0x101949ab0000000, quorum=127.0.0.1:61873, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-01T23:53:55,122 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40611 2024-12-01T23:53:55,122 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40611 2024-12-01T23:53:55,123 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40611 2024-12-01T23:53:55,126 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40611 2024-12-01T23:53:55,127 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40611 2024-12-01T23:53:55,225 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/579b0f681375:0 server-side Connection retries=45 2024-12-01T23:53:55,226 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T23:53:55,227 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-01T23:53:55,227 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-01T23:53:55,227 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T23:53:55,227 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-01T23:53:55,231 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-01T23:53:55,233 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-01T23:53:55,234 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45401 2024-12-01T23:53:55,236 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45401 connecting to ZooKeeper ensemble=127.0.0.1:61873 2024-12-01T23:53:55,237 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T23:53:55,241 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T23:53:55,312 DEBUG [pool-50-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:454010x0, quorum=127.0.0.1:61873, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-01T23:53:55,313 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45401-0x101949ab0000001 connected 2024-12-01T23:53:55,314 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45401-0x101949ab0000001, quorum=127.0.0.1:61873, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T23:53:55,321 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-01T23:53:55,329 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-01T23:53:55,331 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45401-0x101949ab0000001, quorum=127.0.0.1:61873, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-01T23:53:55,336 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45401-0x101949ab0000001, quorum=127.0.0.1:61873, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-01T23:53:55,337 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45401 2024-12-01T23:53:55,337 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45401 2024-12-01T23:53:55,338 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45401 2024-12-01T23:53:55,339 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45401 2024-12-01T23:53:55,339 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45401 2024-12-01T23:53:55,353 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/579b0f681375:0 server-side Connection retries=45 2024-12-01T23:53:55,353 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T23:53:55,353 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-01T23:53:55,354 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-01T23:53:55,354 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T23:53:55,354 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-01T23:53:55,354 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-01T23:53:55,354 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-01T23:53:55,355 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44651 2024-12-01T23:53:55,357 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44651 connecting to ZooKeeper ensemble=127.0.0.1:61873 2024-12-01T23:53:55,358 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T23:53:55,360 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T23:53:55,385 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:446510x0, quorum=127.0.0.1:61873, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-01T23:53:55,386 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44651-0x101949ab0000002 connected 2024-12-01T23:53:55,386 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44651-0x101949ab0000002, quorum=127.0.0.1:61873, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T23:53:55,387 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-01T23:53:55,388 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-01T23:53:55,389 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44651-0x101949ab0000002, quorum=127.0.0.1:61873, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-01T23:53:55,392 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44651-0x101949ab0000002, quorum=127.0.0.1:61873, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-01T23:53:55,393 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44651 2024-12-01T23:53:55,393 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44651 2024-12-01T23:53:55,394 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44651 2024-12-01T23:53:55,394 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44651 2024-12-01T23:53:55,395 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44651 2024-12-01T23:53:55,412 DEBUG [M:0;579b0f681375:40611 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;579b0f681375:40611 2024-12-01T23:53:55,413 INFO [master/579b0f681375:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/579b0f681375,40611,1733097234530 2024-12-01T23:53:55,429 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44651-0x101949ab0000002, quorum=127.0.0.1:61873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T23:53:55,429 DEBUG [pool-50-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45401-0x101949ab0000001, quorum=127.0.0.1:61873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T23:53:55,429 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40611-0x101949ab0000000, quorum=127.0.0.1:61873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T23:53:55,431 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40611-0x101949ab0000000, quorum=127.0.0.1:61873, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/579b0f681375,40611,1733097234530 2024-12-01T23:53:55,462 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44651-0x101949ab0000002, quorum=127.0.0.1:61873, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-01T23:53:55,462 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40611-0x101949ab0000000, quorum=127.0.0.1:61873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:53:55,462 DEBUG [pool-50-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45401-0x101949ab0000001, quorum=127.0.0.1:61873, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-01T23:53:55,462 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44651-0x101949ab0000002, quorum=127.0.0.1:61873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:53:55,462 DEBUG [pool-50-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45401-0x101949ab0000001, quorum=127.0.0.1:61873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:53:55,463 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40611-0x101949ab0000000, quorum=127.0.0.1:61873, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-01T23:53:55,463 INFO [master/579b0f681375:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/579b0f681375,40611,1733097234530 from backup master directory 2024-12-01T23:53:55,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44651-0x101949ab0000002, quorum=127.0.0.1:61873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T23:53:55,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40611-0x101949ab0000000, quorum=127.0.0.1:61873, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/579b0f681375,40611,1733097234530 2024-12-01T23:53:55,470 DEBUG [pool-50-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45401-0x101949ab0000001, quorum=127.0.0.1:61873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T23:53:55,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40611-0x101949ab0000000, quorum=127.0.0.1:61873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T23:53:55,471 WARN [master/579b0f681375:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-01T23:53:55,471 INFO [master/579b0f681375:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=579b0f681375,40611,1733097234530 2024-12-01T23:53:55,473 INFO [master/579b0f681375:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-01T23:53:55,475 INFO [master/579b0f681375:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-01T23:53:55,535 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/hbase.id] with ID: 9b366596-4c21-469d-a085-5ab2e4711c75 2024-12-01T23:53:55,535 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/.tmp/hbase.id 2024-12-01T23:53:55,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39935 is added to blk_1073741826_1002 (size=42) 2024-12-01T23:53:55,549 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/.tmp/hbase.id]:[hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/hbase.id] 2024-12-01T23:53:55,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46129 is added to blk_1073741826_1002 (size=42) 2024-12-01T23:53:55,592 INFO [master/579b0f681375:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T23:53:55,596 INFO [master/579b0f681375:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-01T23:53:55,613 INFO [master/579b0f681375:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 15ms. 2024-12-01T23:53:55,645 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44651-0x101949ab0000002, quorum=127.0.0.1:61873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:53:55,645 DEBUG [pool-50-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45401-0x101949ab0000001, quorum=127.0.0.1:61873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:53:55,645 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40611-0x101949ab0000000, quorum=127.0.0.1:61873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:53:55,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46129 is added to blk_1073741827_1003 (size=196) 2024-12-01T23:53:55,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39935 is added to blk_1073741827_1003 (size=196) 2024-12-01T23:53:55,680 INFO [master/579b0f681375:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-01T23:53:55,682 INFO [master/579b0f681375:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-01T23:53:55,695 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:150) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:174) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:262) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:231) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:400) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:1003) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2535) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:613) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.lambda$tracedRunnable$2(TraceUtil.java:155) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T23:53:55,699 INFO [master/579b0f681375:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-01T23:53:55,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46129 is added to blk_1073741828_1004 (size=1189) 2024-12-01T23:53:55,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39935 is added to blk_1073741828_1004 (size=1189) 2024-12-01T23:53:55,744 INFO [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/MasterData/data/master/store 2024-12-01T23:53:55,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46129 is added to blk_1073741829_1005 (size=34) 2024-12-01T23:53:55,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39935 is added to blk_1073741829_1005 (size=34) 2024-12-01T23:53:55,768 INFO [master/579b0f681375:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-01T23:53:55,771 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T23:53:55,772 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-01T23:53:55,773 INFO [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T23:53:55,773 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T23:53:55,775 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-01T23:53:55,775 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T23:53:55,775 INFO [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T23:53:55,776 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733097235772Disabling compacts and flushes for region at 1733097235772Disabling writes for close at 1733097235775 (+3 ms)Writing region close event to WAL at 1733097235775Closed at 1733097235775 2024-12-01T23:53:55,778 WARN [master/579b0f681375:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/MasterData/data/master/store/.initializing 2024-12-01T23:53:55,778 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/MasterData/WALs/579b0f681375,40611,1733097234530 2024-12-01T23:53:55,787 INFO [master/579b0f681375:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-01T23:53:55,803 INFO [master/579b0f681375:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=579b0f681375%2C40611%2C1733097234530, suffix=, logDir=hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/MasterData/WALs/579b0f681375,40611,1733097234530, archiveDir=hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/MasterData/oldWALs, maxLogs=10 2024-12-01T23:53:55,825 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/MasterData/WALs/579b0f681375,40611,1733097234530/579b0f681375%2C40611%2C1733097234530.1733097235808, exclude list is [], retry=0 2024-12-01T23:53:55,843 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39935,DS-e911d9dd-51de-49e2-a93a-82136a215d17,DISK] 2024-12-01T23:53:55,843 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46129,DS-9d7ebde0-20b7-41fb-8d71-4bf5406ae646,DISK] 2024-12-01T23:53:55,846 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-01T23:53:55,884 INFO [master/579b0f681375:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/MasterData/WALs/579b0f681375,40611,1733097234530/579b0f681375%2C40611%2C1733097234530.1733097235808 2024-12-01T23:53:55,885 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37593:37593),(127.0.0.1/127.0.0.1:42783:42783)] 2024-12-01T23:53:55,886 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-01T23:53:55,887 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T23:53:55,891 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T23:53:55,892 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T23:53:55,930 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-01T23:53:55,954 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-01T23:53:55,957 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T23:53:55,960 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T23:53:55,961 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-01T23:53:55,964 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-01T23:53:55,965 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T23:53:55,966 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T23:53:55,966 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-01T23:53:55,969 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-01T23:53:55,969 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T23:53:55,970 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T23:53:55,971 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-01T23:53:55,973 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-01T23:53:55,973 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T23:53:55,975 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T23:53:55,975 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T23:53:55,979 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-01T23:53:55,981 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-01T23:53:55,987 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T23:53:55,988 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T23:53:55,991 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-01T23:53:55,995 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T23:53:56,000 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T23:53:56,002 INFO [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72420036, jitterRate=0.07914263010025024}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-01T23:53:56,008 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733097235907Initializing all the Stores at 1733097235910 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733097235910Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733097235911 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733097235911Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733097235911Cleaning up temporary data from old regions at 1733097235988 (+77 ms)Region opened successfully at 1733097236008 (+20 ms) 2024-12-01T23:53:56,009 INFO [master/579b0f681375:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-01T23:53:56,038 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a265c26, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=579b0f681375/172.17.0.2:0 2024-12-01T23:53:56,063 INFO [master/579b0f681375:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-01T23:53:56,072 INFO [master/579b0f681375:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-01T23:53:56,072 INFO [master/579b0f681375:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-01T23:53:56,075 INFO [master/579b0f681375:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-01T23:53:56,076 INFO [master/579b0f681375:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-01T23:53:56,080 INFO [master/579b0f681375:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-12-01T23:53:56,080 INFO [master/579b0f681375:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-01T23:53:56,104 INFO [master/579b0f681375:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-01T23:53:56,112 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40611-0x101949ab0000000, quorum=127.0.0.1:61873, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-01T23:53:56,170 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-01T23:53:56,176 INFO [master/579b0f681375:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-01T23:53:56,180 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40611-0x101949ab0000000, quorum=127.0.0.1:61873, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-01T23:53:56,186 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-01T23:53:56,188 INFO [master/579b0f681375:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-01T23:53:56,192 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40611-0x101949ab0000000, quorum=127.0.0.1:61873, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-01T23:53:56,203 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-01T23:53:56,205 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40611-0x101949ab0000000, quorum=127.0.0.1:61873, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-01T23:53:56,211 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-01T23:53:56,233 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40611-0x101949ab0000000, quorum=127.0.0.1:61873, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-01T23:53:56,242 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-01T23:53:56,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40611-0x101949ab0000000, quorum=127.0.0.1:61873, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-01T23:53:56,253 DEBUG [pool-50-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45401-0x101949ab0000001, quorum=127.0.0.1:61873, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-01T23:53:56,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44651-0x101949ab0000002, quorum=127.0.0.1:61873, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-01T23:53:56,254 DEBUG [pool-50-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45401-0x101949ab0000001, quorum=127.0.0.1:61873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:53:56,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44651-0x101949ab0000002, quorum=127.0.0.1:61873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:53:56,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40611-0x101949ab0000000, quorum=127.0.0.1:61873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:53:56,259 INFO [master/579b0f681375:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=579b0f681375,40611,1733097234530, sessionid=0x101949ab0000000, setting cluster-up flag (Was=false) 2024-12-01T23:53:56,287 DEBUG [pool-50-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45401-0x101949ab0000001, quorum=127.0.0.1:61873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:53:56,287 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40611-0x101949ab0000000, quorum=127.0.0.1:61873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:53:56,287 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44651-0x101949ab0000002, quorum=127.0.0.1:61873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:53:56,312 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-01T23:53:56,314 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=579b0f681375,40611,1733097234530 2024-12-01T23:53:56,337 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40611-0x101949ab0000000, quorum=127.0.0.1:61873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:53:56,337 DEBUG [pool-50-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45401-0x101949ab0000001, quorum=127.0.0.1:61873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:53:56,337 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44651-0x101949ab0000002, quorum=127.0.0.1:61873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:53:56,362 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-01T23:53:56,365 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=579b0f681375,40611,1733097234530 2024-12-01T23:53:56,373 INFO [master/579b0f681375:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-01T23:53:56,399 INFO [RS:1;579b0f681375:44651 {}] regionserver.HRegionServer(746): ClusterId : 9b366596-4c21-469d-a085-5ab2e4711c75 2024-12-01T23:53:56,399 INFO [RS:0;579b0f681375:45401 {}] regionserver.HRegionServer(746): ClusterId : 9b366596-4c21-469d-a085-5ab2e4711c75 2024-12-01T23:53:56,402 DEBUG [RS:1;579b0f681375:44651 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-01T23:53:56,402 INFO [master/579b0f681375:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.quotas.MasterQuotasObserver loaded, priority=536870911. 2024-12-01T23:53:56,402 DEBUG [RS:0;579b0f681375:45401 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-01T23:53:56,422 DEBUG [RS:0;579b0f681375:45401 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-01T23:53:56,422 DEBUG [RS:1;579b0f681375:44651 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-01T23:53:56,422 DEBUG [RS:0;579b0f681375:45401 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-01T23:53:56,422 DEBUG [RS:1;579b0f681375:44651 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-01T23:53:56,429 DEBUG [RS:0;579b0f681375:45401 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-01T23:53:56,429 DEBUG [RS:1;579b0f681375:44651 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-01T23:53:56,430 DEBUG [RS:0;579b0f681375:45401 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@122603c2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=579b0f681375/172.17.0.2:0 2024-12-01T23:53:56,430 DEBUG [RS:1;579b0f681375:44651 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75a524d6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=579b0f681375/172.17.0.2:0 2024-12-01T23:53:56,447 DEBUG [RS:1;579b0f681375:44651 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;579b0f681375:44651 2024-12-01T23:53:56,450 DEBUG [RS:0;579b0f681375:45401 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;579b0f681375:45401 2024-12-01T23:53:56,451 INFO [RS:0;579b0f681375:45401 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-01T23:53:56,451 INFO [RS:1;579b0f681375:44651 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-01T23:53:56,451 INFO [RS:0;579b0f681375:45401 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-01T23:53:56,451 INFO [RS:1;579b0f681375:44651 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-01T23:53:56,451 DEBUG [RS:0;579b0f681375:45401 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-01T23:53:56,451 DEBUG [RS:1;579b0f681375:44651 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-01T23:53:56,452 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-01T23:53:56,454 INFO [RS:1;579b0f681375:44651 {}] regionserver.HRegionServer(2659): reportForDuty to master=579b0f681375,40611,1733097234530 with port=44651, startcode=1733097235352 2024-12-01T23:53:56,454 INFO [RS:0;579b0f681375:45401 {}] regionserver.HRegionServer(2659): reportForDuty to master=579b0f681375,40611,1733097234530 with port=45401, startcode=1733097235193 2024-12-01T23:53:56,465 INFO [master/579b0f681375:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-01T23:53:56,465 DEBUG [RS:0;579b0f681375:45401 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-01T23:53:56,465 DEBUG [RS:1;579b0f681375:44651 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-01T23:53:56,473 INFO [master/579b0f681375:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-01T23:53:56,480 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 579b0f681375,40611,1733097234530 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-01T23:53:56,488 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/579b0f681375:0, corePoolSize=5, maxPoolSize=5 2024-12-01T23:53:56,488 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/579b0f681375:0, corePoolSize=5, maxPoolSize=5 2024-12-01T23:53:56,489 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/579b0f681375:0, corePoolSize=5, maxPoolSize=5 2024-12-01T23:53:56,489 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/579b0f681375:0, corePoolSize=5, maxPoolSize=5 2024-12-01T23:53:56,489 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/579b0f681375:0, corePoolSize=10, maxPoolSize=10 2024-12-01T23:53:56,489 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:53:56,489 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/579b0f681375:0, corePoolSize=2, maxPoolSize=2 2024-12-01T23:53:56,489 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:53:56,495 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-01T23:53:56,495 INFO [master/579b0f681375:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733097266495 2024-12-01T23:53:56,496 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-01T23:53:56,498 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56919, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-01T23:53:56,498 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35473, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-01T23:53:56,498 INFO [master/579b0f681375:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-01T23:53:56,499 INFO [master/579b0f681375:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-01T23:53:56,501 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T23:53:56,501 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-01T23:53:56,502 INFO [master/579b0f681375:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-01T23:53:56,502 INFO [master/579b0f681375:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-01T23:53:56,503 INFO [master/579b0f681375:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-01T23:53:56,503 INFO [master/579b0f681375:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-01T23:53:56,505 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40611 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-01T23:53:56,504 INFO [master/579b0f681375:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-01T23:53:56,509 INFO [master/579b0f681375:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-01T23:53:56,511 INFO [master/579b0f681375:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-01T23:53:56,511 INFO [master/579b0f681375:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-01T23:53:56,512 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40611 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-01T23:53:56,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39935 is added to blk_1073741831_1007 (size=1321) 2024-12-01T23:53:56,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46129 is added to blk_1073741831_1007 (size=1321) 2024-12-01T23:53:56,514 INFO [master/579b0f681375:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-01T23:53:56,514 INFO [master/579b0f681375:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-01T23:53:56,517 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/579b0f681375:0:becomeActiveMaster-HFileCleaner.large.0-1733097236516,5,FailOnTimeoutGroup] 2024-12-01T23:53:56,517 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-01T23:53:56,517 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/579b0f681375:0:becomeActiveMaster-HFileCleaner.small.0-1733097236517,5,FailOnTimeoutGroup] 2024-12-01T23:53:56,517 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18 2024-12-01T23:53:56,517 INFO [master/579b0f681375:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-01T23:53:56,518 INFO [master/579b0f681375:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-01T23:53:56,519 INFO [master/579b0f681375:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-01T23:53:56,519 INFO [master/579b0f681375:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-01T23:53:56,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46129 is added to blk_1073741832_1008 (size=32) 2024-12-01T23:53:56,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39935 is added to blk_1073741832_1008 (size=32) 2024-12-01T23:53:56,535 DEBUG [RS:0;579b0f681375:45401 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-01T23:53:56,535 DEBUG [RS:1;579b0f681375:44651 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-01T23:53:56,535 WARN [RS:0;579b0f681375:45401 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-01T23:53:56,535 WARN [RS:1;579b0f681375:44651 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-01T23:53:56,536 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T23:53:56,538 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-01T23:53:56,542 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-01T23:53:56,542 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T23:53:56,543 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T23:53:56,543 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-01T23:53:56,545 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-01T23:53:56,545 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T23:53:56,546 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T23:53:56,546 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-01T23:53:56,548 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-01T23:53:56,548 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T23:53:56,549 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T23:53:56,549 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-01T23:53:56,551 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-01T23:53:56,551 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T23:53:56,552 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T23:53:56,552 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-01T23:53:56,554 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/hbase/meta/1588230740 2024-12-01T23:53:56,554 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/hbase/meta/1588230740 2024-12-01T23:53:56,557 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-01T23:53:56,557 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-01T23:53:56,558 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-01T23:53:56,561 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-01T23:53:56,566 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T23:53:56,567 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69012096, jitterRate=0.028360366821289062}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-01T23:53:56,569 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733097236536Initializing all the Stores at 1733097236538 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733097236538Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733097236538Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733097236538Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733097236538Cleaning up temporary data from old regions at 1733097236558 (+20 ms)Region opened successfully at 1733097236569 (+11 ms) 2024-12-01T23:53:56,569 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-01T23:53:56,569 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-01T23:53:56,569 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-01T23:53:56,569 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-01T23:53:56,569 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-01T23:53:56,570 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-01T23:53:56,571 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733097236569Disabling compacts and flushes for region at 1733097236569Disabling writes for close at 1733097236569Writing region close event to WAL at 1733097236570 (+1 ms)Closed at 1733097236570 2024-12-01T23:53:56,574 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-01T23:53:56,574 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-01T23:53:56,579 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-01T23:53:56,586 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-01T23:53:56,589 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-01T23:53:56,636 INFO [RS:1;579b0f681375:44651 {}] regionserver.HRegionServer(2659): reportForDuty to master=579b0f681375,40611,1733097234530 with port=44651, startcode=1733097235352 2024-12-01T23:53:56,636 INFO [RS:0;579b0f681375:45401 {}] regionserver.HRegionServer(2659): reportForDuty to master=579b0f681375,40611,1733097234530 with port=45401, startcode=1733097235193 2024-12-01T23:53:56,639 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40611 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 579b0f681375,44651,1733097235352 2024-12-01T23:53:56,643 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40611 {}] master.ServerManager(517): Registering regionserver=579b0f681375,44651,1733097235352 2024-12-01T23:53:56,651 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40611 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 579b0f681375,45401,1733097235193 2024-12-01T23:53:56,651 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40611 {}] master.ServerManager(517): Registering regionserver=579b0f681375,45401,1733097235193 2024-12-01T23:53:56,651 DEBUG [RS:1;579b0f681375:44651 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18 2024-12-01T23:53:56,651 DEBUG [RS:1;579b0f681375:44651 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38903 2024-12-01T23:53:56,651 DEBUG [RS:1;579b0f681375:44651 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-01T23:53:56,653 DEBUG [RS:0;579b0f681375:45401 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18 2024-12-01T23:53:56,653 DEBUG [RS:0;579b0f681375:45401 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38903 2024-12-01T23:53:56,653 DEBUG [RS:0;579b0f681375:45401 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-01T23:53:56,670 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40611-0x101949ab0000000, quorum=127.0.0.1:61873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-01T23:53:56,700 DEBUG [RS:1;579b0f681375:44651 {}] zookeeper.ZKUtil(111): regionserver:44651-0x101949ab0000002, quorum=127.0.0.1:61873, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/579b0f681375,44651,1733097235352 2024-12-01T23:53:56,701 WARN [RS:1;579b0f681375:44651 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-01T23:53:56,701 INFO [RS:1;579b0f681375:44651 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-01T23:53:56,701 DEBUG [RS:0;579b0f681375:45401 {}] zookeeper.ZKUtil(111): regionserver:45401-0x101949ab0000001, quorum=127.0.0.1:61873, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/579b0f681375,45401,1733097235193 2024-12-01T23:53:56,702 WARN [RS:0;579b0f681375:45401 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-01T23:53:56,702 DEBUG [RS:1;579b0f681375:44651 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/WALs/579b0f681375,44651,1733097235352 2024-12-01T23:53:56,702 INFO [RS:0;579b0f681375:45401 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-01T23:53:56,702 DEBUG [RS:0;579b0f681375:45401 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/WALs/579b0f681375,45401,1733097235193 2024-12-01T23:53:56,704 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [579b0f681375,45401,1733097235193] 2024-12-01T23:53:56,704 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [579b0f681375,44651,1733097235352] 2024-12-01T23:53:56,729 INFO [RS:1;579b0f681375:44651 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-01T23:53:56,729 INFO [RS:0;579b0f681375:45401 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-01T23:53:56,740 WARN [579b0f681375:40611 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-01T23:53:56,742 INFO [RS:0;579b0f681375:45401 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-01T23:53:56,742 INFO [RS:1;579b0f681375:44651 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-01T23:53:56,748 INFO [RS:0;579b0f681375:45401 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-01T23:53:56,748 INFO [RS:1;579b0f681375:44651 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-01T23:53:56,748 INFO [RS:1;579b0f681375:44651 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T23:53:56,748 INFO [RS:0;579b0f681375:45401 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T23:53:56,749 INFO [RS:1;579b0f681375:44651 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-01T23:53:56,749 INFO [RS:0;579b0f681375:45401 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-01T23:53:56,754 INFO [RS:0;579b0f681375:45401 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-01T23:53:56,754 INFO [RS:1;579b0f681375:44651 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-01T23:53:56,756 INFO [RS:0;579b0f681375:45401 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-01T23:53:56,756 INFO [RS:1;579b0f681375:44651 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-01T23:53:56,756 DEBUG [RS:1;579b0f681375:44651 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:53:56,756 DEBUG [RS:0;579b0f681375:45401 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:53:56,756 DEBUG [RS:0;579b0f681375:45401 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:53:56,756 DEBUG [RS:1;579b0f681375:44651 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:53:56,756 DEBUG [RS:0;579b0f681375:45401 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:53:56,756 DEBUG [RS:1;579b0f681375:44651 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:53:56,757 DEBUG [RS:0;579b0f681375:45401 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:53:56,757 DEBUG [RS:1;579b0f681375:44651 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:53:56,757 DEBUG [RS:0;579b0f681375:45401 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:53:56,757 DEBUG [RS:1;579b0f681375:44651 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:53:56,757 DEBUG [RS:0;579b0f681375:45401 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/579b0f681375:0, corePoolSize=2, maxPoolSize=2 2024-12-01T23:53:56,757 DEBUG [RS:1;579b0f681375:44651 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/579b0f681375:0, corePoolSize=2, maxPoolSize=2 2024-12-01T23:53:56,757 DEBUG [RS:0;579b0f681375:45401 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:53:56,757 DEBUG [RS:1;579b0f681375:44651 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:53:56,757 DEBUG [RS:0;579b0f681375:45401 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:53:56,757 DEBUG [RS:1;579b0f681375:44651 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:53:56,757 DEBUG [RS:0;579b0f681375:45401 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:53:56,757 DEBUG [RS:1;579b0f681375:44651 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:53:56,757 DEBUG [RS:0;579b0f681375:45401 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:53:56,757 DEBUG [RS:1;579b0f681375:44651 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:53:56,757 DEBUG [RS:0;579b0f681375:45401 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:53:56,757 DEBUG [RS:1;579b0f681375:44651 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:53:56,758 DEBUG [RS:0;579b0f681375:45401 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:53:56,758 DEBUG [RS:1;579b0f681375:44651 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/579b0f681375:0, corePoolSize=1, maxPoolSize=1 2024-12-01T23:53:56,758 DEBUG [RS:0;579b0f681375:45401 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/579b0f681375:0, corePoolSize=3, maxPoolSize=3 2024-12-01T23:53:56,758 DEBUG [RS:1;579b0f681375:44651 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/579b0f681375:0, corePoolSize=3, maxPoolSize=3 2024-12-01T23:53:56,758 DEBUG [RS:0;579b0f681375:45401 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/579b0f681375:0, corePoolSize=3, maxPoolSize=3 2024-12-01T23:53:56,758 DEBUG [RS:1;579b0f681375:44651 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/579b0f681375:0, corePoolSize=3, maxPoolSize=3 2024-12-01T23:53:56,759 INFO [RS:1;579b0f681375:44651 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T23:53:56,759 INFO [RS:0;579b0f681375:45401 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T23:53:56,759 INFO [RS:1;579b0f681375:44651 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T23:53:56,759 INFO [RS:0;579b0f681375:45401 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T23:53:56,759 INFO [RS:1;579b0f681375:44651 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T23:53:56,759 INFO [RS:1;579b0f681375:44651 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-01T23:53:56,759 INFO [RS:0;579b0f681375:45401 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T23:53:56,759 INFO [RS:1;579b0f681375:44651 {}] hbase.ChoreService(168): Chore ScheduledChore name=FileSystemUtilizationChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T23:53:56,759 INFO [RS:0;579b0f681375:45401 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-01T23:53:56,759 INFO [RS:1;579b0f681375:44651 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-01T23:53:56,759 INFO [RS:1;579b0f681375:44651 {}] hbase.ChoreService(168): Chore ScheduledChore name=579b0f681375,44651,1733097235352-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-01T23:53:56,759 INFO [RS:0;579b0f681375:45401 {}] hbase.ChoreService(168): Chore ScheduledChore name=FileSystemUtilizationChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T23:53:56,759 INFO [RS:0;579b0f681375:45401 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-01T23:53:56,759 INFO [RS:0;579b0f681375:45401 {}] hbase.ChoreService(168): Chore ScheduledChore name=579b0f681375,45401,1733097235193-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-01T23:53:56,778 INFO [RS:0;579b0f681375:45401 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-01T23:53:56,778 INFO [RS:1;579b0f681375:44651 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-01T23:53:56,779 INFO [RS:1;579b0f681375:44651 {}] hbase.ChoreService(168): Chore ScheduledChore name=579b0f681375,44651,1733097235352-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T23:53:56,779 INFO [RS:0;579b0f681375:45401 {}] hbase.ChoreService(168): Chore ScheduledChore name=579b0f681375,45401,1733097235193-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T23:53:56,780 INFO [RS:1;579b0f681375:44651 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T23:53:56,780 INFO [RS:0;579b0f681375:45401 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T23:53:56,780 INFO [RS:1;579b0f681375:44651 {}] regionserver.Replication(171): 579b0f681375,44651,1733097235352 started 2024-12-01T23:53:56,780 INFO [RS:0;579b0f681375:45401 {}] regionserver.Replication(171): 579b0f681375,45401,1733097235193 started 2024-12-01T23:53:56,798 INFO [RS:0;579b0f681375:45401 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T23:53:56,798 INFO [RS:1;579b0f681375:44651 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T23:53:56,798 INFO [RS:1;579b0f681375:44651 {}] regionserver.HRegionServer(1482): Serving as 579b0f681375,44651,1733097235352, RpcServer on 579b0f681375/172.17.0.2:44651, sessionid=0x101949ab0000002 2024-12-01T23:53:56,798 INFO [RS:0;579b0f681375:45401 {}] regionserver.HRegionServer(1482): Serving as 579b0f681375,45401,1733097235193, RpcServer on 579b0f681375/172.17.0.2:45401, sessionid=0x101949ab0000001 2024-12-01T23:53:56,799 DEBUG [RS:1;579b0f681375:44651 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-01T23:53:56,799 DEBUG [RS:0;579b0f681375:45401 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-01T23:53:56,799 DEBUG [RS:1;579b0f681375:44651 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 579b0f681375,44651,1733097235352 2024-12-01T23:53:56,799 DEBUG [RS:0;579b0f681375:45401 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 579b0f681375,45401,1733097235193 2024-12-01T23:53:56,799 DEBUG [RS:0;579b0f681375:45401 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '579b0f681375,45401,1733097235193' 2024-12-01T23:53:56,799 DEBUG [RS:1;579b0f681375:44651 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '579b0f681375,44651,1733097235352' 2024-12-01T23:53:56,799 DEBUG [RS:0;579b0f681375:45401 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-01T23:53:56,799 DEBUG [RS:1;579b0f681375:44651 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-01T23:53:56,800 DEBUG [RS:0;579b0f681375:45401 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-01T23:53:56,800 DEBUG [RS:1;579b0f681375:44651 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-01T23:53:56,801 DEBUG [RS:0;579b0f681375:45401 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-01T23:53:56,801 DEBUG [RS:1;579b0f681375:44651 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-01T23:53:56,801 DEBUG [RS:0;579b0f681375:45401 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-01T23:53:56,801 DEBUG [RS:1;579b0f681375:44651 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-01T23:53:56,801 DEBUG [RS:1;579b0f681375:44651 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 579b0f681375,44651,1733097235352 2024-12-01T23:53:56,801 DEBUG [RS:0;579b0f681375:45401 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 579b0f681375,45401,1733097235193 2024-12-01T23:53:56,801 DEBUG [RS:0;579b0f681375:45401 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '579b0f681375,45401,1733097235193' 2024-12-01T23:53:56,801 DEBUG [RS:1;579b0f681375:44651 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '579b0f681375,44651,1733097235352' 2024-12-01T23:53:56,801 DEBUG [RS:1;579b0f681375:44651 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-01T23:53:56,801 DEBUG [RS:0;579b0f681375:45401 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-01T23:53:56,802 DEBUG [RS:1;579b0f681375:44651 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-01T23:53:56,802 DEBUG [RS:0;579b0f681375:45401 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-01T23:53:56,802 DEBUG [RS:1;579b0f681375:44651 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-01T23:53:56,802 DEBUG [RS:0;579b0f681375:45401 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-01T23:53:56,802 INFO [RS:1;579b0f681375:44651 {}] quotas.RegionServerRpcQuotaManager(68): Initializing RPC quota support 2024-12-01T23:53:56,802 INFO [RS:0;579b0f681375:45401 {}] quotas.RegionServerRpcQuotaManager(68): Initializing RPC quota support 2024-12-01T23:53:56,805 INFO [RS:0;579b0f681375:45401 {}] hbase.ChoreService(168): Chore ScheduledChore name=QuotaRefresherChore, period=1800000, unit=MILLISECONDS is enabled. 2024-12-01T23:53:56,805 INFO [RS:1;579b0f681375:44651 {}] hbase.ChoreService(168): Chore ScheduledChore name=QuotaRefresherChore, period=1800000, unit=MILLISECONDS is enabled. 2024-12-01T23:53:56,806 DEBUG [RS:1;579b0f681375:44651 {}] zookeeper.ZKUtil(347): regionserver:44651-0x101949ab0000002, quorum=127.0.0.1:61873, baseZNode=/hbase Unable to get data of znode /hbase/rpc-throttle because node does not exist (not an error) 2024-12-01T23:53:56,806 DEBUG [RS:0;579b0f681375:45401 {}] zookeeper.ZKUtil(347): regionserver:45401-0x101949ab0000001, quorum=127.0.0.1:61873, baseZNode=/hbase Unable to get data of znode /hbase/rpc-throttle because node does not exist (not an error) 2024-12-01T23:53:56,806 INFO [RS:1;579b0f681375:44651 {}] quotas.RegionServerRpcQuotaManager(74): Start rpc quota manager and rpc throttle enabled is true 2024-12-01T23:53:56,806 INFO [RS:0;579b0f681375:45401 {}] quotas.RegionServerRpcQuotaManager(74): Start rpc quota manager and rpc throttle enabled is true 2024-12-01T23:53:56,806 INFO [RS:0;579b0f681375:45401 {}] hbase.ChoreService(168): Chore ScheduledChore name=SpaceQuotaRefresherChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T23:53:56,806 INFO [RS:1;579b0f681375:44651 {}] hbase.ChoreService(168): Chore ScheduledChore name=SpaceQuotaRefresherChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T23:53:56,807 INFO [RS:0;579b0f681375:45401 {}] hbase.ChoreService(168): Chore ScheduledChore name=RegionSizeReportingChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T23:53:56,807 INFO [RS:1;579b0f681375:44651 {}] hbase.ChoreService(168): Chore ScheduledChore name=RegionSizeReportingChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T23:53:56,912 INFO [RS:0;579b0f681375:45401 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-01T23:53:56,912 INFO [RS:1;579b0f681375:44651 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-01T23:53:56,915 INFO [RS:0;579b0f681375:45401 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=579b0f681375%2C45401%2C1733097235193, suffix=, logDir=hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/WALs/579b0f681375,45401,1733097235193, archiveDir=hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/oldWALs, maxLogs=32 2024-12-01T23:53:56,915 INFO [RS:1;579b0f681375:44651 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=579b0f681375%2C44651%2C1733097235352, suffix=, logDir=hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/WALs/579b0f681375,44651,1733097235352, archiveDir=hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/oldWALs, maxLogs=32 2024-12-01T23:53:56,932 DEBUG [RS:0;579b0f681375:45401 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/WALs/579b0f681375,45401,1733097235193/579b0f681375%2C45401%2C1733097235193.1733097236920, exclude list is [], retry=0 2024-12-01T23:53:56,934 DEBUG [RS:1;579b0f681375:44651 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/WALs/579b0f681375,44651,1733097235352/579b0f681375%2C44651%2C1733097235352.1733097236920, exclude list is [], retry=0 2024-12-01T23:53:56,937 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46129,DS-9d7ebde0-20b7-41fb-8d71-4bf5406ae646,DISK] 2024-12-01T23:53:56,937 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39935,DS-e911d9dd-51de-49e2-a93a-82136a215d17,DISK] 2024-12-01T23:53:56,938 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46129,DS-9d7ebde0-20b7-41fb-8d71-4bf5406ae646,DISK] 2024-12-01T23:53:56,938 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39935,DS-e911d9dd-51de-49e2-a93a-82136a215d17,DISK] 2024-12-01T23:53:56,941 INFO [RS:1;579b0f681375:44651 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/WALs/579b0f681375,44651,1733097235352/579b0f681375%2C44651%2C1733097235352.1733097236920 2024-12-01T23:53:56,941 INFO [RS:0;579b0f681375:45401 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/WALs/579b0f681375,45401,1733097235193/579b0f681375%2C45401%2C1733097235193.1733097236920 2024-12-01T23:53:56,942 DEBUG [RS:0;579b0f681375:45401 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37593:37593),(127.0.0.1/127.0.0.1:42783:42783)] 2024-12-01T23:53:56,942 DEBUG [RS:1;579b0f681375:44651 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37593:37593),(127.0.0.1/127.0.0.1:42783:42783)] 2024-12-01T23:53:56,993 DEBUG [579b0f681375:40611 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=2, allServersCount=2 2024-12-01T23:53:57,001 DEBUG [579b0f681375:40611 {}] balancer.BalancerClusterState(204): Hosts are {579b0f681375=0} racks are {/default-rack=0} 2024-12-01T23:53:57,009 DEBUG [579b0f681375:40611 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-01T23:53:57,009 DEBUG [579b0f681375:40611 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-01T23:53:57,009 DEBUG [579b0f681375:40611 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-01T23:53:57,009 DEBUG [579b0f681375:40611 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-01T23:53:57,009 INFO [579b0f681375:40611 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-01T23:53:57,009 INFO [579b0f681375:40611 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-01T23:53:57,010 DEBUG [579b0f681375:40611 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-01T23:53:57,019 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=579b0f681375,45401,1733097235193 2024-12-01T23:53:57,025 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 579b0f681375,45401,1733097235193, state=OPENING 2024-12-01T23:53:57,059 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-01T23:53:57,070 DEBUG [pool-50-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45401-0x101949ab0000001, quorum=127.0.0.1:61873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:53:57,070 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44651-0x101949ab0000002, quorum=127.0.0.1:61873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:53:57,070 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40611-0x101949ab0000000, quorum=127.0.0.1:61873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:53:57,071 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T23:53:57,071 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T23:53:57,071 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T23:53:57,073 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-01T23:53:57,075 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=579b0f681375,45401,1733097235193}] 2024-12-01T23:53:57,252 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-01T23:53:57,255 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38695, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-01T23:53:57,268 INFO [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-01T23:53:57,268 INFO [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-01T23:53:57,269 INFO [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-01T23:53:57,272 INFO [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=579b0f681375%2C45401%2C1733097235193.meta, suffix=.meta, logDir=hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/WALs/579b0f681375,45401,1733097235193, archiveDir=hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/oldWALs, maxLogs=32 2024-12-01T23:53:57,286 DEBUG [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/WALs/579b0f681375,45401,1733097235193/579b0f681375%2C45401%2C1733097235193.meta.1733097237274.meta, exclude list is [], retry=0 2024-12-01T23:53:57,290 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46129,DS-9d7ebde0-20b7-41fb-8d71-4bf5406ae646,DISK] 2024-12-01T23:53:57,290 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39935,DS-e911d9dd-51de-49e2-a93a-82136a215d17,DISK] 2024-12-01T23:53:57,294 INFO [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/WALs/579b0f681375,45401,1733097235193/579b0f681375%2C45401%2C1733097235193.meta.1733097237274.meta 2024-12-01T23:53:57,294 DEBUG [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37593:37593),(127.0.0.1/127.0.0.1:42783:42783)] 2024-12-01T23:53:57,294 DEBUG [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-01T23:53:57,296 DEBUG [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-01T23:53:57,298 DEBUG [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-01T23:53:57,300 INFO [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-01T23:53:57,303 DEBUG [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-01T23:53:57,304 DEBUG [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T23:53:57,304 DEBUG [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-01T23:53:57,304 DEBUG [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-01T23:53:57,307 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-01T23:53:57,309 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-01T23:53:57,310 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T23:53:57,311 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T23:53:57,311 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-01T23:53:57,313 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-01T23:53:57,313 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T23:53:57,314 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T23:53:57,314 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-01T23:53:57,316 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-01T23:53:57,316 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T23:53:57,317 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T23:53:57,317 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-01T23:53:57,318 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-01T23:53:57,319 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T23:53:57,319 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T23:53:57,320 DEBUG [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-01T23:53:57,322 DEBUG [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/hbase/meta/1588230740 2024-12-01T23:53:57,324 DEBUG [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/hbase/meta/1588230740 2024-12-01T23:53:57,327 DEBUG [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-01T23:53:57,327 DEBUG [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-01T23:53:57,328 DEBUG [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-01T23:53:57,330 DEBUG [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-01T23:53:57,332 INFO [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71389454, jitterRate=0.06378576159477234}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-01T23:53:57,332 DEBUG [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-01T23:53:57,333 DEBUG [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733097237305Writing region info on filesystem at 1733097237305Initializing all the Stores at 1733097237307 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733097237307Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733097237307Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733097237307Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733097237307Cleaning up temporary data from old regions at 1733097237327 (+20 ms)Running coprocessor post-open hooks at 1733097237332 (+5 ms)Region opened successfully at 1733097237333 (+1 ms) 2024-12-01T23:53:57,339 INFO [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733097237244 2024-12-01T23:53:57,349 DEBUG [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-01T23:53:57,350 INFO [RS_OPEN_META-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-01T23:53:57,352 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=579b0f681375,45401,1733097235193 2024-12-01T23:53:57,354 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 579b0f681375,45401,1733097235193, state=OPEN 2024-12-01T23:53:57,387 DEBUG [pool-50-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45401-0x101949ab0000001, quorum=127.0.0.1:61873, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-01T23:53:57,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44651-0x101949ab0000002, quorum=127.0.0.1:61873, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-01T23:53:57,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40611-0x101949ab0000000, quorum=127.0.0.1:61873, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-01T23:53:57,387 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T23:53:57,387 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T23:53:57,387 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T23:53:57,387 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=579b0f681375,45401,1733097235193 2024-12-01T23:53:57,394 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-01T23:53:57,394 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=579b0f681375,45401,1733097235193 in 312 msec 2024-12-01T23:53:57,402 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-01T23:53:57,402 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 816 msec 2024-12-01T23:53:57,404 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-01T23:53:57,404 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-01T23:53:57,424 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T23:53:57,426 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=579b0f681375,45401,1733097235193, seqNum=-1] 2024-12-01T23:53:57,446 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T23:53:57,448 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42045, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T23:53:57,469 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.0600 sec 2024-12-01T23:53:57,469 INFO [master/579b0f681375:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733097237469, completionTime=-1 2024-12-01T23:53:57,471 INFO [master/579b0f681375:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=2; waited=0ms, expected min=2 server(s), max=2 server(s), master is running 2024-12-01T23:53:57,472 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-01T23:53:57,499 INFO [master/579b0f681375:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=2 2024-12-01T23:53:57,499 INFO [master/579b0f681375:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733097297499 2024-12-01T23:53:57,499 INFO [master/579b0f681375:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733097357499 2024-12-01T23:53:57,499 INFO [master/579b0f681375:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 27 msec 2024-12-01T23:53:57,500 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-01T23:53:57,507 INFO [master/579b0f681375:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=579b0f681375,40611,1733097234530-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T23:53:57,507 INFO [master/579b0f681375:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=579b0f681375,40611,1733097234530-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T23:53:57,507 INFO [master/579b0f681375:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=579b0f681375,40611,1733097234530-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T23:53:57,509 INFO [master/579b0f681375:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-579b0f681375:40611, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T23:53:57,509 INFO [master/579b0f681375:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-01T23:53:57,510 INFO [master/579b0f681375:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-01T23:53:57,516 DEBUG [master/579b0f681375:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-01T23:53:57,536 INFO [master/579b0f681375:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.065sec 2024-12-01T23:53:57,537 INFO [master/579b0f681375:0:becomeActiveMaster {}] quotas.MasterQuotaManager(103): Quota table not found. Creating... 2024-12-01T23:53:57,539 INFO [master/579b0f681375:0:becomeActiveMaster {}] master.HMaster(2490): Client=null/null create 'hbase:quota', {NAME => 'q', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'u', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-01T23:53:57,545 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=hbase:quota 2024-12-01T23:53:57,546 INFO [master/579b0f681375:0:becomeActiveMaster {}] quotas.MasterQuotaManager(107): Initializing quota support 2024-12-01T23:53:57,547 INFO [master/579b0f681375:0:becomeActiveMaster {}] namespace.NamespaceStateManager(59): Namespace State Manager started. 2024-12-01T23:53:57,548 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=hbase:quota execute state=CREATE_TABLE_PRE_OPERATION 2024-12-01T23:53:57,549 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T23:53:57,552 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=hbase:quota execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-01T23:53:57,561 INFO [master/579b0f681375:0:becomeActiveMaster {}] namespace.NamespaceStateManager(222): Finished updating state of 2 namespaces. 2024-12-01T23:53:57,561 INFO [master/579b0f681375:0:becomeActiveMaster {}] namespace.NamespaceAuditor(50): NamespaceAuditor started. 2024-12-01T23:53:57,563 INFO [master/579b0f681375:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=QuotaObserverChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T23:53:57,563 INFO [master/579b0f681375:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=QuotaObserverChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T23:53:57,564 INFO [master/579b0f681375:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-01T23:53:57,565 INFO [master/579b0f681375:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-01T23:53:57,566 INFO [master/579b0f681375:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-01T23:53:57,566 INFO [master/579b0f681375:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-01T23:53:57,567 INFO [master/579b0f681375:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=579b0f681375,40611,1733097234530-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-01T23:53:57,568 INFO [master/579b0f681375:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=579b0f681375,40611,1733097234530-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-01T23:53:57,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46129 is added to blk_1073741836_1012 (size=624) 2024-12-01T23:53:57,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39935 is added to blk_1073741836_1012 (size=624) 2024-12-01T23:53:57,573 DEBUG [master/579b0f681375:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-01T23:53:57,573 INFO [RegionOpenAndInit-hbase:quota-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 67f67293b31d223687049a16b9d906fa, NAME => 'hbase:quota,,1733097237538.67f67293b31d223687049a16b9d906fa.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:quota', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'q', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'u', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18 2024-12-01T23:53:57,574 INFO [master/579b0f681375:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-01T23:53:57,574 INFO [master/579b0f681375:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=579b0f681375,40611,1733097234530-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T23:53:57,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46129 is added to blk_1073741837_1013 (size=38) 2024-12-01T23:53:57,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39935 is added to blk_1073741837_1013 (size=38) 2024-12-01T23:53:57,588 DEBUG [RegionOpenAndInit-hbase:quota-pool-0 {}] regionserver.HRegion(898): Instantiated hbase:quota,,1733097237538.67f67293b31d223687049a16b9d906fa.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T23:53:57,588 DEBUG [RegionOpenAndInit-hbase:quota-pool-0 {}] regionserver.HRegion(1722): Closing 67f67293b31d223687049a16b9d906fa, disabling compactions & flushes 2024-12-01T23:53:57,588 INFO [RegionOpenAndInit-hbase:quota-pool-0 {}] regionserver.HRegion(1755): Closing region hbase:quota,,1733097237538.67f67293b31d223687049a16b9d906fa. 2024-12-01T23:53:57,588 DEBUG [RegionOpenAndInit-hbase:quota-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:quota,,1733097237538.67f67293b31d223687049a16b9d906fa. 2024-12-01T23:53:57,588 DEBUG [RegionOpenAndInit-hbase:quota-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on hbase:quota,,1733097237538.67f67293b31d223687049a16b9d906fa. after waiting 0 ms 2024-12-01T23:53:57,588 DEBUG [RegionOpenAndInit-hbase:quota-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region hbase:quota,,1733097237538.67f67293b31d223687049a16b9d906fa. 2024-12-01T23:53:57,588 INFO [RegionOpenAndInit-hbase:quota-pool-0 {}] regionserver.HRegion(1973): Closed hbase:quota,,1733097237538.67f67293b31d223687049a16b9d906fa. 2024-12-01T23:53:57,588 DEBUG [RegionOpenAndInit-hbase:quota-pool-0 {}] regionserver.HRegion(1676): Region close journal for 67f67293b31d223687049a16b9d906fa: Waiting for close lock at 1733097237588Disabling compacts and flushes for region at 1733097237588Disabling writes for close at 1733097237588Writing region close event to WAL at 1733097237588Closed at 1733097237588 2024-12-01T23:53:57,591 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=hbase:quota execute state=CREATE_TABLE_ADD_TO_META 2024-12-01T23:53:57,596 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"hbase:quota,,1733097237538.67f67293b31d223687049a16b9d906fa.","families":{"info":[{"qualifier":"regioninfo","vlen":37,"tag":[],"timestamp":"1733097237592"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733097237592"}]},"ts":"1733097237592"} 2024-12-01T23:53:57,602 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-01T23:53:57,602 ERROR [Time-limited test {}] master.TableStateManager(82): Unable to get table hbase:quota state org.apache.hadoop.hbase.TableNotFoundException: No state found for hbase:quota at org.apache.hadoop.hbase.master.TableStateManager.getTableState(TableStateManager.java:141) ~[classes/:?] at org.apache.hadoop.hbase.master.TableStateManager.isTableState(TableStateManager.java:79) ~[classes/:?] at org.apache.hadoop.hbase.master.assignment.AssignmentManager.isTableDisabled(AssignmentManager.java:549) ~[classes/:?] at org.apache.hadoop.hbase.master.assignment.AssignmentManager.getRegionStatesCount(AssignmentManager.java:2637) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.getClusterMetricsWithoutCoprocessor(HMaster.java:3077) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.getClusterMetrics(HMaster.java:3132) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.getClusterMetrics(HMaster.java:3125) ~[classes/:?] at org.apache.hadoop.hbase.SingleProcessHBaseCluster.getClusterMetrics(SingleProcessHBaseCluster.java:687) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.SingleProcessHBaseCluster.(SingleProcessHBaseCluster.java:113) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.startMiniHBaseCluster(HBaseTestingUtil.java:863) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.startMiniCluster(HBaseTestingUtil.java:830) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.startMiniCluster(HBaseTestingUtil.java:784) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.setUpBeforeClass(TestClusterScopeQuotaThrottle.java:78) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.invokeMethod(RunBefores.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:24) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T23:53:57,604 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=hbase:quota execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-01T23:53:57,606 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:quota","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733097237604"}]},"ts":"1733097237604"} 2024-12-01T23:53:57,611 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:quota, state=ENABLING in hbase:meta 2024-12-01T23:53:57,611 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {579b0f681375=0} racks are {/default-rack=0} 2024-12-01T23:53:57,613 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@64a55c88, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T23:53:57,613 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-01T23:53:57,614 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-01T23:53:57,614 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-01T23:53:57,614 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-01T23:53:57,614 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-01T23:53:57,614 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-01T23:53:57,614 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-01T23:53:57,617 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:quota, region=67f67293b31d223687049a16b9d906fa, ASSIGN}] 2024-12-01T23:53:57,617 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-01T23:53:57,617 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-01T23:53:57,619 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:quota, region=67f67293b31d223687049a16b9d906fa, ASSIGN 2024-12-01T23:53:57,621 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:quota, region=67f67293b31d223687049a16b9d906fa, ASSIGN; state=OFFLINE, location=579b0f681375,44651,1733097235352; forceNewPlan=false, retain=false 2024-12-01T23:53:57,657 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 579b0f681375,40611,-1 for getting cluster id 2024-12-01T23:53:57,661 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-01T23:53:57,689 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '9b366596-4c21-469d-a085-5ab2e4711c75' 2024-12-01T23:53:57,693 DEBUG [RPCClient-NioEventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-01T23:53:57,694 DEBUG [RPCClient-NioEventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "9b366596-4c21-469d-a085-5ab2e4711c75" 2024-12-01T23:53:57,697 DEBUG [RPCClient-NioEventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d5d3bec, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T23:53:57,697 DEBUG [RPCClient-NioEventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [579b0f681375,40611,-1] 2024-12-01T23:53:57,700 DEBUG [RPCClient-NioEventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-01T23:53:57,703 DEBUG [RPCClient-NioEventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T23:53:57,710 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36342, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-01T23:53:57,713 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@74165b90, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T23:53:57,714 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T23:53:57,727 DEBUG [RPCClient-NioEventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=579b0f681375,45401,1733097235193, seqNum=-1] 2024-12-01T23:53:57,727 DEBUG [RPCClient-NioEventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T23:53:57,731 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45098, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T23:53:57,751 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=579b0f681375,40611,1733097234530 2024-12-01T23:53:57,752 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T23:53:57,774 INFO [579b0f681375:40611 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-01T23:53:57,775 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=67f67293b31d223687049a16b9d906fa, regionState=OPENING, regionLocation=579b0f681375,44651,1733097235352 2024-12-01T23:53:57,780 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:quota, region=67f67293b31d223687049a16b9d906fa, ASSIGN because future has completed 2024-12-01T23:53:57,783 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 67f67293b31d223687049a16b9d906fa, server=579b0f681375,44651,1733097235352}] 2024-12-01T23:53:57,938 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-01T23:53:57,941 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45697, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-01T23:53:57,948 INFO [RS_OPEN_PRIORITY_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(132): Open hbase:quota,,1733097237538.67f67293b31d223687049a16b9d906fa. 2024-12-01T23:53:57,949 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 67f67293b31d223687049a16b9d906fa, NAME => 'hbase:quota,,1733097237538.67f67293b31d223687049a16b9d906fa.', STARTKEY => '', ENDKEY => ''} 2024-12-01T23:53:57,949 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table quota 67f67293b31d223687049a16b9d906fa 2024-12-01T23:53:57,949 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(898): Instantiated hbase:quota,,1733097237538.67f67293b31d223687049a16b9d906fa.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T23:53:57,949 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 67f67293b31d223687049a16b9d906fa 2024-12-01T23:53:57,950 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 67f67293b31d223687049a16b9d906fa 2024-12-01T23:53:57,952 INFO [StoreOpener-67f67293b31d223687049a16b9d906fa-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family q of region 67f67293b31d223687049a16b9d906fa 2024-12-01T23:53:57,955 INFO [StoreOpener-67f67293b31d223687049a16b9d906fa-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 67f67293b31d223687049a16b9d906fa columnFamilyName q 2024-12-01T23:53:57,955 DEBUG [StoreOpener-67f67293b31d223687049a16b9d906fa-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T23:53:57,956 INFO [StoreOpener-67f67293b31d223687049a16b9d906fa-1 {}] regionserver.HStore(327): Store=67f67293b31d223687049a16b9d906fa/q, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T23:53:57,956 INFO [StoreOpener-67f67293b31d223687049a16b9d906fa-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family u of region 67f67293b31d223687049a16b9d906fa 2024-12-01T23:53:57,958 INFO [StoreOpener-67f67293b31d223687049a16b9d906fa-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 67f67293b31d223687049a16b9d906fa columnFamilyName u 2024-12-01T23:53:57,959 DEBUG [StoreOpener-67f67293b31d223687049a16b9d906fa-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T23:53:57,959 INFO [StoreOpener-67f67293b31d223687049a16b9d906fa-1 {}] regionserver.HStore(327): Store=67f67293b31d223687049a16b9d906fa/u, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T23:53:57,960 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 67f67293b31d223687049a16b9d906fa 2024-12-01T23:53:57,962 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/hbase/quota/67f67293b31d223687049a16b9d906fa 2024-12-01T23:53:57,962 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/hbase/quota/67f67293b31d223687049a16b9d906fa 2024-12-01T23:53:57,963 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 67f67293b31d223687049a16b9d906fa 2024-12-01T23:53:57,963 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 67f67293b31d223687049a16b9d906fa 2024-12-01T23:53:57,964 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:quota descriptor;using region.getMemStoreFlushHeapSize/# of families (64.0 M)) instead. 2024-12-01T23:53:57,966 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 67f67293b31d223687049a16b9d906fa 2024-12-01T23:53:57,970 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/hbase/quota/67f67293b31d223687049a16b9d906fa/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T23:53:57,971 INFO [RS_OPEN_PRIORITY_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1114): Opened 67f67293b31d223687049a16b9d906fa; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73993259, jitterRate=0.10258547961711884}}}, FlushLargeStoresPolicy{flushSizeLowerBound=67108864} 2024-12-01T23:53:57,971 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 67f67293b31d223687049a16b9d906fa 2024-12-01T23:53:57,972 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 67f67293b31d223687049a16b9d906fa: Running coprocessor pre-open hook at 1733097237950Writing region info on filesystem at 1733097237950Initializing all the Stores at 1733097237952 (+2 ms)Instantiating store for column family {NAME => 'q', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733097237952Instantiating store for column family {NAME => 'u', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733097237952Cleaning up temporary data from old regions at 1733097237963 (+11 ms)Running coprocessor post-open hooks at 1733097237971 (+8 ms)Region opened successfully at 1733097237972 (+1 ms) 2024-12-01T23:53:57,974 INFO [RS_OPEN_PRIORITY_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:quota,,1733097237538.67f67293b31d223687049a16b9d906fa., pid=6, masterSystemTime=1733097237938 2024-12-01T23:53:57,977 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:quota,,1733097237538.67f67293b31d223687049a16b9d906fa. 2024-12-01T23:53:57,977 INFO [RS_OPEN_PRIORITY_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(153): Opened hbase:quota,,1733097237538.67f67293b31d223687049a16b9d906fa. 2024-12-01T23:53:57,978 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=67f67293b31d223687049a16b9d906fa, regionState=OPEN, openSeqNum=2, regionLocation=579b0f681375,44651,1733097235352 2024-12-01T23:53:57,982 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 67f67293b31d223687049a16b9d906fa, server=579b0f681375,44651,1733097235352 because future has completed 2024-12-01T23:53:57,989 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-01T23:53:57,991 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 67f67293b31d223687049a16b9d906fa, server=579b0f681375,44651,1733097235352 in 201 msec 2024-12-01T23:53:57,994 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-01T23:53:57,994 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:quota, region=67f67293b31d223687049a16b9d906fa, ASSIGN in 372 msec 2024-12-01T23:53:57,996 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=hbase:quota execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-01T23:53:57,996 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:quota","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733097237996"}]},"ts":"1733097237996"} 2024-12-01T23:53:57,999 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:quota, state=ENABLED in hbase:meta 2024-12-01T23:53:58,001 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=hbase:quota execute state=CREATE_TABLE_POST_OPERATION 2024-12-01T23:53:58,006 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=hbase:quota in 462 msec 2024-12-01T23:53:58,072 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=hbase:quota,, stopping at row=hbase:quota ,, for max=2147483647 with caching=100 2024-12-01T23:53:58,080 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-01T23:53:58,084 DEBUG [RPCClient-NioEventLoopGroup-5-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 579b0f681375,40611,1733097234530 2024-12-01T23:53:58,087 DEBUG [RPCClient-NioEventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@4dae6b9d 2024-12-01T23:53:58,091 DEBUG [RPCClient-NioEventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-01T23:53:58,093 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36358, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-01T23:53:58,096 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=hbase:quota,, stopping at row=hbase:quota ,, for max=2147483647 with caching=100 2024-12-01T23:53:58,103 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T23:53:58,106 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50794, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T23:53:58,118 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40611 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestQuotaAdmin0', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-01T23:53:58,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40611 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestQuotaAdmin0 2024-12-01T23:53:58,124 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestQuotaAdmin0 execute state=CREATE_TABLE_PRE_OPERATION 2024-12-01T23:53:58,126 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T23:53:58,126 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40611 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestQuotaAdmin0" procId is: 7 2024-12-01T23:53:58,128 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestQuotaAdmin0 execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-01T23:53:58,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40611 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-01T23:53:58,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46129 is added to blk_1073741838_1014 (size=391) 2024-12-01T23:53:58,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39935 is added to blk_1073741838_1014 (size=391) 2024-12-01T23:53:58,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40611 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-01T23:53:58,544 INFO [RegionOpenAndInit-TestQuotaAdmin0-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 3db8e2be75873790be5eaad9cf5be7a6, NAME => 'TestQuotaAdmin0,,1733097238113.3db8e2be75873790be5eaad9cf5be7a6.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestQuotaAdmin0', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18 2024-12-01T23:53:58,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46129 is added to blk_1073741839_1015 (size=50) 2024-12-01T23:53:58,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39935 is added to blk_1073741839_1015 (size=50) 2024-12-01T23:53:58,555 DEBUG [RegionOpenAndInit-TestQuotaAdmin0-pool-0 {}] regionserver.HRegion(898): Instantiated TestQuotaAdmin0,,1733097238113.3db8e2be75873790be5eaad9cf5be7a6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T23:53:58,555 DEBUG [RegionOpenAndInit-TestQuotaAdmin0-pool-0 {}] regionserver.HRegion(1722): Closing 3db8e2be75873790be5eaad9cf5be7a6, disabling compactions & flushes 2024-12-01T23:53:58,555 INFO [RegionOpenAndInit-TestQuotaAdmin0-pool-0 {}] regionserver.HRegion(1755): Closing region TestQuotaAdmin0,,1733097238113.3db8e2be75873790be5eaad9cf5be7a6. 2024-12-01T23:53:58,555 DEBUG [RegionOpenAndInit-TestQuotaAdmin0-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestQuotaAdmin0,,1733097238113.3db8e2be75873790be5eaad9cf5be7a6. 2024-12-01T23:53:58,555 DEBUG [RegionOpenAndInit-TestQuotaAdmin0-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestQuotaAdmin0,,1733097238113.3db8e2be75873790be5eaad9cf5be7a6. after waiting 0 ms 2024-12-01T23:53:58,555 DEBUG [RegionOpenAndInit-TestQuotaAdmin0-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestQuotaAdmin0,,1733097238113.3db8e2be75873790be5eaad9cf5be7a6. 2024-12-01T23:53:58,555 INFO [RegionOpenAndInit-TestQuotaAdmin0-pool-0 {}] regionserver.HRegion(1973): Closed TestQuotaAdmin0,,1733097238113.3db8e2be75873790be5eaad9cf5be7a6. 2024-12-01T23:53:58,556 DEBUG [RegionOpenAndInit-TestQuotaAdmin0-pool-0 {}] regionserver.HRegion(1676): Region close journal for 3db8e2be75873790be5eaad9cf5be7a6: Waiting for close lock at 1733097238555Disabling compacts and flushes for region at 1733097238555Disabling writes for close at 1733097238555Writing region close event to WAL at 1733097238555Closed at 1733097238555 2024-12-01T23:53:58,558 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestQuotaAdmin0 execute state=CREATE_TABLE_ADD_TO_META 2024-12-01T23:53:58,558 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestQuotaAdmin0,,1733097238113.3db8e2be75873790be5eaad9cf5be7a6.","families":{"info":[{"qualifier":"regioninfo","vlen":49,"tag":[],"timestamp":"1733097238558"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733097238558"}]},"ts":"1733097238558"} 2024-12-01T23:53:58,561 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-01T23:53:58,562 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestQuotaAdmin0 execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-01T23:53:58,563 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin0","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733097238563"}]},"ts":"1733097238563"} 2024-12-01T23:53:58,566 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin0, state=ENABLING in hbase:meta 2024-12-01T23:53:58,566 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {579b0f681375=0} racks are {/default-rack=0} 2024-12-01T23:53:58,567 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-01T23:53:58,567 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-01T23:53:58,567 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-01T23:53:58,567 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-01T23:53:58,567 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-01T23:53:58,567 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-01T23:53:58,567 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-01T23:53:58,568 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin0, region=3db8e2be75873790be5eaad9cf5be7a6, ASSIGN}] 2024-12-01T23:53:58,569 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin0, region=3db8e2be75873790be5eaad9cf5be7a6, ASSIGN 2024-12-01T23:53:58,570 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestQuotaAdmin0, region=3db8e2be75873790be5eaad9cf5be7a6, ASSIGN; state=OFFLINE, location=579b0f681375,45401,1733097235193; forceNewPlan=false, retain=false 2024-12-01T23:53:58,721 INFO [579b0f681375:40611 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-01T23:53:58,722 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=3db8e2be75873790be5eaad9cf5be7a6, regionState=OPENING, regionLocation=579b0f681375,45401,1733097235193 2024-12-01T23:53:58,729 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestQuotaAdmin0, region=3db8e2be75873790be5eaad9cf5be7a6, ASSIGN because future has completed 2024-12-01T23:53:58,731 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3db8e2be75873790be5eaad9cf5be7a6, server=579b0f681375,45401,1733097235193}] 2024-12-01T23:53:58,894 INFO [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] handler.AssignRegionHandler(132): Open TestQuotaAdmin0,,1733097238113.3db8e2be75873790be5eaad9cf5be7a6. 2024-12-01T23:53:58,895 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(7752): Opening region: {ENCODED => 3db8e2be75873790be5eaad9cf5be7a6, NAME => 'TestQuotaAdmin0,,1733097238113.3db8e2be75873790be5eaad9cf5be7a6.', STARTKEY => '', ENDKEY => ''} 2024-12-01T23:53:58,895 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestQuotaAdmin0 3db8e2be75873790be5eaad9cf5be7a6 2024-12-01T23:53:58,895 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(898): Instantiated TestQuotaAdmin0,,1733097238113.3db8e2be75873790be5eaad9cf5be7a6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T23:53:58,895 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(7794): checking encryption for 3db8e2be75873790be5eaad9cf5be7a6 2024-12-01T23:53:58,896 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(7797): checking classloading for 3db8e2be75873790be5eaad9cf5be7a6 2024-12-01T23:53:58,899 INFO [StoreOpener-3db8e2be75873790be5eaad9cf5be7a6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 3db8e2be75873790be5eaad9cf5be7a6 2024-12-01T23:53:58,902 INFO [StoreOpener-3db8e2be75873790be5eaad9cf5be7a6-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3db8e2be75873790be5eaad9cf5be7a6 columnFamilyName cf 2024-12-01T23:53:58,902 DEBUG [StoreOpener-3db8e2be75873790be5eaad9cf5be7a6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T23:53:58,903 INFO [StoreOpener-3db8e2be75873790be5eaad9cf5be7a6-1 {}] regionserver.HStore(327): Store=3db8e2be75873790be5eaad9cf5be7a6/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T23:53:58,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40611 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-01T23:53:58,903 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1038): replaying wal for 3db8e2be75873790be5eaad9cf5be7a6 2024-12-01T23:53:58,904 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/default/TestQuotaAdmin0/3db8e2be75873790be5eaad9cf5be7a6 2024-12-01T23:53:58,905 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/default/TestQuotaAdmin0/3db8e2be75873790be5eaad9cf5be7a6 2024-12-01T23:53:58,906 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1048): stopping wal replay for 3db8e2be75873790be5eaad9cf5be7a6 2024-12-01T23:53:58,906 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1060): Cleaning up temporary data for 3db8e2be75873790be5eaad9cf5be7a6 2024-12-01T23:53:58,908 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1093): writing seq id for 3db8e2be75873790be5eaad9cf5be7a6 2024-12-01T23:53:58,912 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/default/TestQuotaAdmin0/3db8e2be75873790be5eaad9cf5be7a6/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T23:53:58,913 INFO [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1114): Opened 3db8e2be75873790be5eaad9cf5be7a6; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=58936648, jitterRate=-0.12177550792694092}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-01T23:53:58,913 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 3db8e2be75873790be5eaad9cf5be7a6 2024-12-01T23:53:58,913 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1006): Region open journal for 3db8e2be75873790be5eaad9cf5be7a6: Running coprocessor pre-open hook at 1733097238896Writing region info on filesystem at 1733097238896Initializing all the Stores at 1733097238898 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733097238898Cleaning up temporary data from old regions at 1733097238906 (+8 ms)Running coprocessor post-open hooks at 1733097238913 (+7 ms)Region opened successfully at 1733097238913 2024-12-01T23:53:58,915 INFO [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegionServer(2236): Post open deploy tasks for TestQuotaAdmin0,,1733097238113.3db8e2be75873790be5eaad9cf5be7a6., pid=9, masterSystemTime=1733097238887 2024-12-01T23:53:58,918 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegionServer(2266): Finished post open deploy task for TestQuotaAdmin0,,1733097238113.3db8e2be75873790be5eaad9cf5be7a6. 2024-12-01T23:53:58,918 INFO [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] handler.AssignRegionHandler(153): Opened TestQuotaAdmin0,,1733097238113.3db8e2be75873790be5eaad9cf5be7a6. 2024-12-01T23:53:58,918 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=3db8e2be75873790be5eaad9cf5be7a6, regionState=OPEN, openSeqNum=2, regionLocation=579b0f681375,45401,1733097235193 2024-12-01T23:53:58,921 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3db8e2be75873790be5eaad9cf5be7a6, server=579b0f681375,45401,1733097235193 because future has completed 2024-12-01T23:53:58,926 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-12-01T23:53:58,926 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; OpenRegionProcedure 3db8e2be75873790be5eaad9cf5be7a6, server=579b0f681375,45401,1733097235193 in 193 msec 2024-12-01T23:53:58,930 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-01T23:53:58,930 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin0, region=3db8e2be75873790be5eaad9cf5be7a6, ASSIGN in 358 msec 2024-12-01T23:53:58,931 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestQuotaAdmin0 execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-01T23:53:58,931 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin0","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733097238931"}]},"ts":"1733097238931"} 2024-12-01T23:53:58,933 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin0, state=ENABLED in hbase:meta 2024-12-01T23:53:58,935 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestQuotaAdmin0 execute state=CREATE_TABLE_POST_OPERATION 2024-12-01T23:53:58,937 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestQuotaAdmin0 in 816 msec 2024-12-01T23:53:59,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40611 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-01T23:53:59,667 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestQuotaAdmin0 completed 2024-12-01T23:53:59,667 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestQuotaAdmin0 get assigned. Timeout = 60000ms 2024-12-01T23:53:59,669 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T23:53:59,678 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestQuotaAdmin0 assigned to meta. Checking AM states. 2024-12-01T23:53:59,679 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T23:53:59,679 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestQuotaAdmin0 assigned. 2024-12-01T23:53:59,679 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T23:53:59,682 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestQuotaAdmin0,, stopping at row=TestQuotaAdmin0 ,, for max=2147483647 with caching=100 2024-12-01T23:53:59,688 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestQuotaAdmin0,, stopping at row=TestQuotaAdmin0 ,, for max=2147483647 with caching=100 2024-12-01T23:53:59,716 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40611 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestQuotaAdmin1', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-01T23:53:59,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40611 {}] procedure2.ProcedureExecutor(1139): Stored pid=10, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestQuotaAdmin1 2024-12-01T23:53:59,719 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=10, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestQuotaAdmin1 execute state=CREATE_TABLE_PRE_OPERATION 2024-12-01T23:53:59,719 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T23:53:59,720 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40611 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestQuotaAdmin1" procId is: 10 2024-12-01T23:53:59,721 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=10, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestQuotaAdmin1 execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-01T23:53:59,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40611 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=10 2024-12-01T23:53:59,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46129 is added to blk_1073741840_1016 (size=391) 2024-12-01T23:53:59,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39935 is added to blk_1073741840_1016 (size=391) 2024-12-01T23:53:59,734 INFO [RegionOpenAndInit-TestQuotaAdmin1-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 6bde2c94473bacf7f0b53f286324955b, NAME => 'TestQuotaAdmin1,,1733097239715.6bde2c94473bacf7f0b53f286324955b.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestQuotaAdmin1', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18 2024-12-01T23:53:59,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39935 is added to blk_1073741841_1017 (size=50) 2024-12-01T23:53:59,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46129 is added to blk_1073741841_1017 (size=50) 2024-12-01T23:53:59,744 DEBUG [RegionOpenAndInit-TestQuotaAdmin1-pool-0 {}] regionserver.HRegion(898): Instantiated TestQuotaAdmin1,,1733097239715.6bde2c94473bacf7f0b53f286324955b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T23:53:59,744 DEBUG [RegionOpenAndInit-TestQuotaAdmin1-pool-0 {}] regionserver.HRegion(1722): Closing 6bde2c94473bacf7f0b53f286324955b, disabling compactions & flushes 2024-12-01T23:53:59,744 INFO [RegionOpenAndInit-TestQuotaAdmin1-pool-0 {}] regionserver.HRegion(1755): Closing region TestQuotaAdmin1,,1733097239715.6bde2c94473bacf7f0b53f286324955b. 2024-12-01T23:53:59,744 DEBUG [RegionOpenAndInit-TestQuotaAdmin1-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestQuotaAdmin1,,1733097239715.6bde2c94473bacf7f0b53f286324955b. 2024-12-01T23:53:59,744 DEBUG [RegionOpenAndInit-TestQuotaAdmin1-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestQuotaAdmin1,,1733097239715.6bde2c94473bacf7f0b53f286324955b. after waiting 0 ms 2024-12-01T23:53:59,744 DEBUG [RegionOpenAndInit-TestQuotaAdmin1-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestQuotaAdmin1,,1733097239715.6bde2c94473bacf7f0b53f286324955b. 2024-12-01T23:53:59,744 INFO [RegionOpenAndInit-TestQuotaAdmin1-pool-0 {}] regionserver.HRegion(1973): Closed TestQuotaAdmin1,,1733097239715.6bde2c94473bacf7f0b53f286324955b. 2024-12-01T23:53:59,744 DEBUG [RegionOpenAndInit-TestQuotaAdmin1-pool-0 {}] regionserver.HRegion(1676): Region close journal for 6bde2c94473bacf7f0b53f286324955b: Waiting for close lock at 1733097239744Disabling compacts and flushes for region at 1733097239744Disabling writes for close at 1733097239744Writing region close event to WAL at 1733097239744Closed at 1733097239744 2024-12-01T23:53:59,746 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=10, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestQuotaAdmin1 execute state=CREATE_TABLE_ADD_TO_META 2024-12-01T23:53:59,747 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestQuotaAdmin1,,1733097239715.6bde2c94473bacf7f0b53f286324955b.","families":{"info":[{"qualifier":"regioninfo","vlen":49,"tag":[],"timestamp":"1733097239746"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733097239746"}]},"ts":"1733097239746"} 2024-12-01T23:53:59,749 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-01T23:53:59,751 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=10, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestQuotaAdmin1 execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-01T23:53:59,751 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733097239751"}]},"ts":"1733097239751"} 2024-12-01T23:53:59,754 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin1, state=ENABLING in hbase:meta 2024-12-01T23:53:59,754 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {579b0f681375=0} racks are {/default-rack=0} 2024-12-01T23:53:59,756 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-01T23:53:59,756 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-01T23:53:59,756 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-01T23:53:59,756 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-01T23:53:59,756 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-01T23:53:59,756 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-01T23:53:59,756 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-01T23:53:59,756 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin1, region=6bde2c94473bacf7f0b53f286324955b, ASSIGN}] 2024-12-01T23:53:59,758 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=10, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin1, region=6bde2c94473bacf7f0b53f286324955b, ASSIGN 2024-12-01T23:53:59,759 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=10, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestQuotaAdmin1, region=6bde2c94473bacf7f0b53f286324955b, ASSIGN; state=OFFLINE, location=579b0f681375,45401,1733097235193; forceNewPlan=false, retain=false 2024-12-01T23:53:59,910 INFO [579b0f681375:40611 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-01T23:53:59,910 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=6bde2c94473bacf7f0b53f286324955b, regionState=OPENING, regionLocation=579b0f681375,45401,1733097235193 2024-12-01T23:53:59,913 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=10, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestQuotaAdmin1, region=6bde2c94473bacf7f0b53f286324955b, ASSIGN because future has completed 2024-12-01T23:53:59,914 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6bde2c94473bacf7f0b53f286324955b, server=579b0f681375,45401,1733097235193}] 2024-12-01T23:53:59,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40611 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=10 2024-12-01T23:54:00,079 INFO [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestQuotaAdmin1,,1733097239715.6bde2c94473bacf7f0b53f286324955b. 2024-12-01T23:54:00,079 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => 6bde2c94473bacf7f0b53f286324955b, NAME => 'TestQuotaAdmin1,,1733097239715.6bde2c94473bacf7f0b53f286324955b.', STARTKEY => '', ENDKEY => ''} 2024-12-01T23:54:00,080 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestQuotaAdmin1 6bde2c94473bacf7f0b53f286324955b 2024-12-01T23:54:00,080 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestQuotaAdmin1,,1733097239715.6bde2c94473bacf7f0b53f286324955b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T23:54:00,080 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for 6bde2c94473bacf7f0b53f286324955b 2024-12-01T23:54:00,080 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for 6bde2c94473bacf7f0b53f286324955b 2024-12-01T23:54:00,082 INFO [StoreOpener-6bde2c94473bacf7f0b53f286324955b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 6bde2c94473bacf7f0b53f286324955b 2024-12-01T23:54:00,085 INFO [StoreOpener-6bde2c94473bacf7f0b53f286324955b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6bde2c94473bacf7f0b53f286324955b columnFamilyName cf 2024-12-01T23:54:00,086 DEBUG [StoreOpener-6bde2c94473bacf7f0b53f286324955b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T23:54:00,086 INFO [StoreOpener-6bde2c94473bacf7f0b53f286324955b-1 {}] regionserver.HStore(327): Store=6bde2c94473bacf7f0b53f286324955b/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T23:54:00,086 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for 6bde2c94473bacf7f0b53f286324955b 2024-12-01T23:54:00,088 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/default/TestQuotaAdmin1/6bde2c94473bacf7f0b53f286324955b 2024-12-01T23:54:00,088 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/default/TestQuotaAdmin1/6bde2c94473bacf7f0b53f286324955b 2024-12-01T23:54:00,089 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for 6bde2c94473bacf7f0b53f286324955b 2024-12-01T23:54:00,089 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for 6bde2c94473bacf7f0b53f286324955b 2024-12-01T23:54:00,092 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for 6bde2c94473bacf7f0b53f286324955b 2024-12-01T23:54:00,096 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/default/TestQuotaAdmin1/6bde2c94473bacf7f0b53f286324955b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T23:54:00,097 INFO [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened 6bde2c94473bacf7f0b53f286324955b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=58959759, jitterRate=-0.1214311271905899}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-01T23:54:00,097 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 6bde2c94473bacf7f0b53f286324955b 2024-12-01T23:54:00,098 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for 6bde2c94473bacf7f0b53f286324955b: Running coprocessor pre-open hook at 1733097240080Writing region info on filesystem at 1733097240080Initializing all the Stores at 1733097240082 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733097240082Cleaning up temporary data from old regions at 1733097240089 (+7 ms)Running coprocessor post-open hooks at 1733097240097 (+8 ms)Region opened successfully at 1733097240098 (+1 ms) 2024-12-01T23:54:00,099 INFO [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestQuotaAdmin1,,1733097239715.6bde2c94473bacf7f0b53f286324955b., pid=12, masterSystemTime=1733097240068 2024-12-01T23:54:00,102 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestQuotaAdmin1,,1733097239715.6bde2c94473bacf7f0b53f286324955b. 2024-12-01T23:54:00,102 INFO [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestQuotaAdmin1,,1733097239715.6bde2c94473bacf7f0b53f286324955b. 2024-12-01T23:54:00,104 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=6bde2c94473bacf7f0b53f286324955b, regionState=OPEN, openSeqNum=2, regionLocation=579b0f681375,45401,1733097235193 2024-12-01T23:54:00,107 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6bde2c94473bacf7f0b53f286324955b, server=579b0f681375,45401,1733097235193 because future has completed 2024-12-01T23:54:00,112 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-12-01T23:54:00,114 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure 6bde2c94473bacf7f0b53f286324955b, server=579b0f681375,45401,1733097235193 in 195 msec 2024-12-01T23:54:00,116 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=10 2024-12-01T23:54:00,116 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=10, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin1, region=6bde2c94473bacf7f0b53f286324955b, ASSIGN in 356 msec 2024-12-01T23:54:00,117 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=10, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestQuotaAdmin1 execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-01T23:54:00,117 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733097240117"}]},"ts":"1733097240117"} 2024-12-01T23:54:00,120 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin1, state=ENABLED in hbase:meta 2024-12-01T23:54:00,121 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=10, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestQuotaAdmin1 execute state=CREATE_TABLE_POST_OPERATION 2024-12-01T23:54:00,124 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestQuotaAdmin1 in 405 msec 2024-12-01T23:54:00,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40611 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=10 2024-12-01T23:54:00,494 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestQuotaAdmin1 completed 2024-12-01T23:54:00,494 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestQuotaAdmin1 get assigned. Timeout = 60000ms 2024-12-01T23:54:00,495 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T23:54:00,503 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestQuotaAdmin1 assigned to meta. Checking AM states. 2024-12-01T23:54:00,503 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T23:54:00,504 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestQuotaAdmin1 assigned. 2024-12-01T23:54:00,504 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T23:54:00,506 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestQuotaAdmin1,, stopping at row=TestQuotaAdmin1 ,, for max=2147483647 with caching=100 2024-12-01T23:54:00,510 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestQuotaAdmin1,, stopping at row=TestQuotaAdmin1 ,, for max=2147483647 with caching=100 2024-12-01T23:54:00,516 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40611 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestQuotaAdmin2', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-01T23:54:00,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40611 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestQuotaAdmin2 2024-12-01T23:54:00,519 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=13, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestQuotaAdmin2 execute state=CREATE_TABLE_PRE_OPERATION 2024-12-01T23:54:00,519 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T23:54:00,519 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40611 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestQuotaAdmin2" procId is: 13 2024-12-01T23:54:00,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40611 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-12-01T23:54:00,521 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=13, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestQuotaAdmin2 execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-01T23:54:00,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39935 is added to blk_1073741842_1018 (size=391) 2024-12-01T23:54:00,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46129 is added to blk_1073741842_1018 (size=391) 2024-12-01T23:54:00,533 INFO [RegionOpenAndInit-TestQuotaAdmin2-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 1e69caa932f6b592705f1016b011510c, NAME => 'TestQuotaAdmin2,,1733097240516.1e69caa932f6b592705f1016b011510c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestQuotaAdmin2', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18 2024-12-01T23:54:00,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39935 is added to blk_1073741843_1019 (size=50) 2024-12-01T23:54:00,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46129 is added to blk_1073741843_1019 (size=50) 2024-12-01T23:54:00,542 DEBUG [RegionOpenAndInit-TestQuotaAdmin2-pool-0 {}] regionserver.HRegion(898): Instantiated TestQuotaAdmin2,,1733097240516.1e69caa932f6b592705f1016b011510c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T23:54:00,542 DEBUG [RegionOpenAndInit-TestQuotaAdmin2-pool-0 {}] regionserver.HRegion(1722): Closing 1e69caa932f6b592705f1016b011510c, disabling compactions & flushes 2024-12-01T23:54:00,542 INFO [RegionOpenAndInit-TestQuotaAdmin2-pool-0 {}] regionserver.HRegion(1755): Closing region TestQuotaAdmin2,,1733097240516.1e69caa932f6b592705f1016b011510c. 2024-12-01T23:54:00,542 DEBUG [RegionOpenAndInit-TestQuotaAdmin2-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestQuotaAdmin2,,1733097240516.1e69caa932f6b592705f1016b011510c. 2024-12-01T23:54:00,542 DEBUG [RegionOpenAndInit-TestQuotaAdmin2-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestQuotaAdmin2,,1733097240516.1e69caa932f6b592705f1016b011510c. after waiting 0 ms 2024-12-01T23:54:00,542 DEBUG [RegionOpenAndInit-TestQuotaAdmin2-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestQuotaAdmin2,,1733097240516.1e69caa932f6b592705f1016b011510c. 2024-12-01T23:54:00,542 INFO [RegionOpenAndInit-TestQuotaAdmin2-pool-0 {}] regionserver.HRegion(1973): Closed TestQuotaAdmin2,,1733097240516.1e69caa932f6b592705f1016b011510c. 2024-12-01T23:54:00,542 DEBUG [RegionOpenAndInit-TestQuotaAdmin2-pool-0 {}] regionserver.HRegion(1676): Region close journal for 1e69caa932f6b592705f1016b011510c: Waiting for close lock at 1733097240542Disabling compacts and flushes for region at 1733097240542Disabling writes for close at 1733097240542Writing region close event to WAL at 1733097240542Closed at 1733097240542 2024-12-01T23:54:00,544 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=13, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestQuotaAdmin2 execute state=CREATE_TABLE_ADD_TO_META 2024-12-01T23:54:00,545 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestQuotaAdmin2,,1733097240516.1e69caa932f6b592705f1016b011510c.","families":{"info":[{"qualifier":"regioninfo","vlen":49,"tag":[],"timestamp":"1733097240544"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733097240544"}]},"ts":"1733097240544"} 2024-12-01T23:54:00,547 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-01T23:54:00,549 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=13, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestQuotaAdmin2 execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-01T23:54:00,549 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin2","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733097240549"}]},"ts":"1733097240549"} 2024-12-01T23:54:00,551 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin2, state=ENABLING in hbase:meta 2024-12-01T23:54:00,552 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {579b0f681375=0} racks are {/default-rack=0} 2024-12-01T23:54:00,553 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-01T23:54:00,553 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-01T23:54:00,553 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-01T23:54:00,553 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-01T23:54:00,553 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-01T23:54:00,553 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-01T23:54:00,553 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-01T23:54:00,553 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin2, region=1e69caa932f6b592705f1016b011510c, ASSIGN}] 2024-12-01T23:54:00,555 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=14, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin2, region=1e69caa932f6b592705f1016b011510c, ASSIGN 2024-12-01T23:54:00,556 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=14, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestQuotaAdmin2, region=1e69caa932f6b592705f1016b011510c, ASSIGN; state=OFFLINE, location=579b0f681375,45401,1733097235193; forceNewPlan=false, retain=false 2024-12-01T23:54:00,706 INFO [579b0f681375:40611 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-01T23:54:00,707 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=14 updating hbase:meta row=1e69caa932f6b592705f1016b011510c, regionState=OPENING, regionLocation=579b0f681375,45401,1733097235193 2024-12-01T23:54:00,710 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=14, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestQuotaAdmin2, region=1e69caa932f6b592705f1016b011510c, ASSIGN because future has completed 2024-12-01T23:54:00,711 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1e69caa932f6b592705f1016b011510c, server=579b0f681375,45401,1733097235193}] 2024-12-01T23:54:00,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40611 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-12-01T23:54:00,873 INFO [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] handler.AssignRegionHandler(132): Open TestQuotaAdmin2,,1733097240516.1e69caa932f6b592705f1016b011510c. 2024-12-01T23:54:00,873 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(7752): Opening region: {ENCODED => 1e69caa932f6b592705f1016b011510c, NAME => 'TestQuotaAdmin2,,1733097240516.1e69caa932f6b592705f1016b011510c.', STARTKEY => '', ENDKEY => ''} 2024-12-01T23:54:00,873 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestQuotaAdmin2 1e69caa932f6b592705f1016b011510c 2024-12-01T23:54:00,873 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(898): Instantiated TestQuotaAdmin2,,1733097240516.1e69caa932f6b592705f1016b011510c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T23:54:00,874 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(7794): checking encryption for 1e69caa932f6b592705f1016b011510c 2024-12-01T23:54:00,874 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(7797): checking classloading for 1e69caa932f6b592705f1016b011510c 2024-12-01T23:54:00,877 INFO [StoreOpener-1e69caa932f6b592705f1016b011510c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 1e69caa932f6b592705f1016b011510c 2024-12-01T23:54:00,879 INFO [StoreOpener-1e69caa932f6b592705f1016b011510c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1e69caa932f6b592705f1016b011510c columnFamilyName cf 2024-12-01T23:54:00,880 DEBUG [StoreOpener-1e69caa932f6b592705f1016b011510c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T23:54:00,880 INFO [StoreOpener-1e69caa932f6b592705f1016b011510c-1 {}] regionserver.HStore(327): Store=1e69caa932f6b592705f1016b011510c/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T23:54:00,880 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1038): replaying wal for 1e69caa932f6b592705f1016b011510c 2024-12-01T23:54:00,881 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/default/TestQuotaAdmin2/1e69caa932f6b592705f1016b011510c 2024-12-01T23:54:00,882 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/default/TestQuotaAdmin2/1e69caa932f6b592705f1016b011510c 2024-12-01T23:54:00,883 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1048): stopping wal replay for 1e69caa932f6b592705f1016b011510c 2024-12-01T23:54:00,883 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1060): Cleaning up temporary data for 1e69caa932f6b592705f1016b011510c 2024-12-01T23:54:00,885 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1093): writing seq id for 1e69caa932f6b592705f1016b011510c 2024-12-01T23:54:00,888 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/default/TestQuotaAdmin2/1e69caa932f6b592705f1016b011510c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T23:54:00,889 INFO [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1114): Opened 1e69caa932f6b592705f1016b011510c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72463457, jitterRate=0.0797896534204483}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-01T23:54:00,889 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1e69caa932f6b592705f1016b011510c 2024-12-01T23:54:00,890 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1006): Region open journal for 1e69caa932f6b592705f1016b011510c: Running coprocessor pre-open hook at 1733097240874Writing region info on filesystem at 1733097240874Initializing all the Stores at 1733097240876 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733097240876Cleaning up temporary data from old regions at 1733097240883 (+7 ms)Running coprocessor post-open hooks at 1733097240889 (+6 ms)Region opened successfully at 1733097240890 (+1 ms) 2024-12-01T23:54:00,891 INFO [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegionServer(2236): Post open deploy tasks for TestQuotaAdmin2,,1733097240516.1e69caa932f6b592705f1016b011510c., pid=15, masterSystemTime=1733097240864 2024-12-01T23:54:00,894 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegionServer(2266): Finished post open deploy task for TestQuotaAdmin2,,1733097240516.1e69caa932f6b592705f1016b011510c. 2024-12-01T23:54:00,894 INFO [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] handler.AssignRegionHandler(153): Opened TestQuotaAdmin2,,1733097240516.1e69caa932f6b592705f1016b011510c. 2024-12-01T23:54:00,895 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=14 updating hbase:meta row=1e69caa932f6b592705f1016b011510c, regionState=OPEN, openSeqNum=2, regionLocation=579b0f681375,45401,1733097235193 2024-12-01T23:54:00,897 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=15, ppid=14, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1e69caa932f6b592705f1016b011510c, server=579b0f681375,45401,1733097235193 because future has completed 2024-12-01T23:54:00,902 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=15, resume processing ppid=14 2024-12-01T23:54:00,902 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=15, ppid=14, state=SUCCESS, hasLock=false; OpenRegionProcedure 1e69caa932f6b592705f1016b011510c, server=579b0f681375,45401,1733097235193 in 188 msec 2024-12-01T23:54:00,905 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-12-01T23:54:00,905 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin2, region=1e69caa932f6b592705f1016b011510c, ASSIGN in 349 msec 2024-12-01T23:54:00,906 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=13, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestQuotaAdmin2 execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-01T23:54:00,906 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin2","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733097240906"}]},"ts":"1733097240906"} 2024-12-01T23:54:00,909 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin2, state=ENABLED in hbase:meta 2024-12-01T23:54:00,910 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=13, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestQuotaAdmin2 execute state=CREATE_TABLE_POST_OPERATION 2024-12-01T23:54:00,912 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestQuotaAdmin2 in 394 msec 2024-12-01T23:54:01,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40611 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-12-01T23:54:01,294 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestQuotaAdmin2 completed 2024-12-01T23:54:01,294 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestQuotaAdmin2 get assigned. Timeout = 60000ms 2024-12-01T23:54:01,295 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T23:54:01,301 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestQuotaAdmin2 assigned to meta. Checking AM states. 2024-12-01T23:54:01,302 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T23:54:01,302 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestQuotaAdmin2 assigned. 2024-12-01T23:54:01,303 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T23:54:01,306 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestQuotaAdmin2,, stopping at row=TestQuotaAdmin2 ,, for max=2147483647 with caching=100 2024-12-01T23:54:01,321 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestQuotaAdmin2,, stopping at row=TestQuotaAdmin2 ,, for max=2147483647 with caching=100 2024-12-01T23:54:01,341 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40611 {}] master.HMaster$18(3529): Client=jenkins//172.17.0.2 creating {NAME => 'TestNs'} 2024-12-01T23:54:01,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40611 {}] procedure2.ProcedureExecutor(1139): Stored pid=16, state=RUNNABLE:CREATE_NAMESPACE_PREPARE, hasLock=false; CreateNamespaceProcedure, namespace=TestNs 2024-12-01T23:54:01,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40611 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=16 2024-12-01T23:54:01,368 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=16, state=SUCCESS, hasLock=false; CreateNamespaceProcedure, namespace=TestNs in 22 msec 2024-12-01T23:54:01,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40611 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=16 2024-12-01T23:54:01,621 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$NamespaceProcedureBiConsumer(2745): Operation: CREATE_NAMESPACE, Namespace: TestNs completed 2024-12-01T23:54:01,624 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40611 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestNs:TestTable', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-01T23:54:01,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40611 {}] procedure2.ProcedureExecutor(1139): Stored pid=17, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestNs:TestTable 2024-12-01T23:54:01,630 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=17, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestNs:TestTable execute state=CREATE_TABLE_PRE_OPERATION 2024-12-01T23:54:01,631 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T23:54:01,631 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40611 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "TestNs" qualifier: "TestTable" procId is: 17 2024-12-01T23:54:01,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40611 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=17 2024-12-01T23:54:01,633 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=17, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestNs:TestTable execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-01T23:54:01,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39935 is added to blk_1073741844_1020 (size=358) 2024-12-01T23:54:01,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46129 is added to blk_1073741844_1020 (size=358) 2024-12-01T23:54:01,703 INFO [RegionOpenAndInit-TestNs:TestTable-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => acce7aef1031d5e94b21fdd93f0aa04d, NAME => 'TestNs:TestTable,,1733097241623.acce7aef1031d5e94b21fdd93f0aa04d.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='TestNs:TestTable', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18 2024-12-01T23:54:01,704 INFO [RegionOpenAndInit-TestNs:TestTable-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => b7dc35984ce3d77666ebbdeb398352af, NAME => 'TestNs:TestTable,1,1733097241623.b7dc35984ce3d77666ebbdeb398352af.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='TestNs:TestTable', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18 2024-12-01T23:54:01,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39935 is added to blk_1073741846_1022 (size=44) 2024-12-01T23:54:01,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46129 is added to blk_1073741846_1022 (size=44) 2024-12-01T23:54:01,787 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-1 {}] regionserver.HRegion(898): Instantiated TestNs:TestTable,1,1733097241623.b7dc35984ce3d77666ebbdeb398352af.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T23:54:01,787 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-1 {}] regionserver.HRegion(1722): Closing b7dc35984ce3d77666ebbdeb398352af, disabling compactions & flushes 2024-12-01T23:54:01,788 INFO [RegionOpenAndInit-TestNs:TestTable-pool-1 {}] regionserver.HRegion(1755): Closing region TestNs:TestTable,1,1733097241623.b7dc35984ce3d77666ebbdeb398352af. 2024-12-01T23:54:01,788 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestNs:TestTable,1,1733097241623.b7dc35984ce3d77666ebbdeb398352af. 2024-12-01T23:54:01,788 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on TestNs:TestTable,1,1733097241623.b7dc35984ce3d77666ebbdeb398352af. after waiting 0 ms 2024-12-01T23:54:01,788 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region TestNs:TestTable,1,1733097241623.b7dc35984ce3d77666ebbdeb398352af. 2024-12-01T23:54:01,788 INFO [RegionOpenAndInit-TestNs:TestTable-pool-1 {}] regionserver.HRegion(1973): Closed TestNs:TestTable,1,1733097241623.b7dc35984ce3d77666ebbdeb398352af. 2024-12-01T23:54:01,788 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-1 {}] regionserver.HRegion(1676): Region close journal for b7dc35984ce3d77666ebbdeb398352af: Waiting for close lock at 1733097241787Disabling compacts and flushes for region at 1733097241787Disabling writes for close at 1733097241788 (+1 ms)Writing region close event to WAL at 1733097241788Closed at 1733097241788 2024-12-01T23:54:01,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39935 is added to blk_1073741845_1021 (size=44) 2024-12-01T23:54:01,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46129 is added to blk_1073741845_1021 (size=44) 2024-12-01T23:54:01,801 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-0 {}] regionserver.HRegion(898): Instantiated TestNs:TestTable,,1733097241623.acce7aef1031d5e94b21fdd93f0aa04d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T23:54:01,801 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-0 {}] regionserver.HRegion(1722): Closing acce7aef1031d5e94b21fdd93f0aa04d, disabling compactions & flushes 2024-12-01T23:54:01,801 INFO [RegionOpenAndInit-TestNs:TestTable-pool-0 {}] regionserver.HRegion(1755): Closing region TestNs:TestTable,,1733097241623.acce7aef1031d5e94b21fdd93f0aa04d. 2024-12-01T23:54:01,801 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestNs:TestTable,,1733097241623.acce7aef1031d5e94b21fdd93f0aa04d. 2024-12-01T23:54:01,801 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestNs:TestTable,,1733097241623.acce7aef1031d5e94b21fdd93f0aa04d. after waiting 0 ms 2024-12-01T23:54:01,802 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestNs:TestTable,,1733097241623.acce7aef1031d5e94b21fdd93f0aa04d. 2024-12-01T23:54:01,802 INFO [RegionOpenAndInit-TestNs:TestTable-pool-0 {}] regionserver.HRegion(1973): Closed TestNs:TestTable,,1733097241623.acce7aef1031d5e94b21fdd93f0aa04d. 2024-12-01T23:54:01,802 DEBUG [RegionOpenAndInit-TestNs:TestTable-pool-0 {}] regionserver.HRegion(1676): Region close journal for acce7aef1031d5e94b21fdd93f0aa04d: Waiting for close lock at 1733097241801Disabling compacts and flushes for region at 1733097241801Disabling writes for close at 1733097241801Writing region close event to WAL at 1733097241802 (+1 ms)Closed at 1733097241802 2024-12-01T23:54:01,806 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=17, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestNs:TestTable execute state=CREATE_TABLE_ADD_TO_META 2024-12-01T23:54:01,807 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestNs:TestTable,1,1733097241623.b7dc35984ce3d77666ebbdeb398352af.","families":{"info":[{"qualifier":"regioninfo","vlen":43,"tag":[],"timestamp":"1733097241806"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733097241806"}]},"ts":"1733097241806"} 2024-12-01T23:54:01,807 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestNs:TestTable,,1733097241623.acce7aef1031d5e94b21fdd93f0aa04d.","families":{"info":[{"qualifier":"regioninfo","vlen":43,"tag":[],"timestamp":"1733097241806"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733097241806"}]},"ts":"1733097241806"} 2024-12-01T23:54:01,857 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-01T23:54:01,860 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=17, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestNs:TestTable execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-01T23:54:01,860 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestNs:TestTable","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733097241860"}]},"ts":"1733097241860"} 2024-12-01T23:54:01,869 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=TestNs:TestTable, state=ENABLING in hbase:meta 2024-12-01T23:54:01,869 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {579b0f681375=0} racks are {/default-rack=0} 2024-12-01T23:54:01,872 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-01T23:54:01,872 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-01T23:54:01,872 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-01T23:54:01,872 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-01T23:54:01,873 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-01T23:54:01,873 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-01T23:54:01,873 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-01T23:54:01,873 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=18, ppid=17, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=acce7aef1031d5e94b21fdd93f0aa04d, ASSIGN}, {pid=19, ppid=17, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=b7dc35984ce3d77666ebbdeb398352af, ASSIGN}] 2024-12-01T23:54:01,877 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=18, ppid=17, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=acce7aef1031d5e94b21fdd93f0aa04d, ASSIGN 2024-12-01T23:54:01,879 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=19, ppid=17, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=b7dc35984ce3d77666ebbdeb398352af, ASSIGN 2024-12-01T23:54:01,881 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=18, ppid=17, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestNs:TestTable, region=acce7aef1031d5e94b21fdd93f0aa04d, ASSIGN; state=OFFLINE, location=579b0f681375,45401,1733097235193; forceNewPlan=false, retain=false 2024-12-01T23:54:01,883 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=19, ppid=17, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestNs:TestTable, region=b7dc35984ce3d77666ebbdeb398352af, ASSIGN; state=OFFLINE, location=579b0f681375,44651,1733097235352; forceNewPlan=false, retain=false 2024-12-01T23:54:01,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40611 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=17 2024-12-01T23:54:02,031 INFO [579b0f681375:40611 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-01T23:54:02,032 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=18 updating hbase:meta row=acce7aef1031d5e94b21fdd93f0aa04d, regionState=OPENING, regionLocation=579b0f681375,45401,1733097235193 2024-12-01T23:54:02,033 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=19 updating hbase:meta row=b7dc35984ce3d77666ebbdeb398352af, regionState=OPENING, regionLocation=579b0f681375,44651,1733097235352 2024-12-01T23:54:02,037 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=19, ppid=17, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestNs:TestTable, region=b7dc35984ce3d77666ebbdeb398352af, ASSIGN because future has completed 2024-12-01T23:54:02,037 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=20, ppid=19, state=RUNNABLE, hasLock=false; OpenRegionProcedure b7dc35984ce3d77666ebbdeb398352af, server=579b0f681375,44651,1733097235352}] 2024-12-01T23:54:02,042 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=18, ppid=17, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestNs:TestTable, region=acce7aef1031d5e94b21fdd93f0aa04d, ASSIGN because future has completed 2024-12-01T23:54:02,042 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=21, ppid=18, state=RUNNABLE, hasLock=false; OpenRegionProcedure acce7aef1031d5e94b21fdd93f0aa04d, server=579b0f681375,45401,1733097235193}] 2024-12-01T23:54:02,204 INFO [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] handler.AssignRegionHandler(132): Open TestNs:TestTable,,1733097241623.acce7aef1031d5e94b21fdd93f0aa04d. 2024-12-01T23:54:02,205 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(7752): Opening region: {ENCODED => acce7aef1031d5e94b21fdd93f0aa04d, NAME => 'TestNs:TestTable,,1733097241623.acce7aef1031d5e94b21fdd93f0aa04d.', STARTKEY => '', ENDKEY => '1'} 2024-12-01T23:54:02,205 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestTable acce7aef1031d5e94b21fdd93f0aa04d 2024-12-01T23:54:02,205 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(898): Instantiated TestNs:TestTable,,1733097241623.acce7aef1031d5e94b21fdd93f0aa04d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T23:54:02,206 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(7794): checking encryption for acce7aef1031d5e94b21fdd93f0aa04d 2024-12-01T23:54:02,206 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(7797): checking classloading for acce7aef1031d5e94b21fdd93f0aa04d 2024-12-01T23:54:02,215 INFO [StoreOpener-acce7aef1031d5e94b21fdd93f0aa04d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region acce7aef1031d5e94b21fdd93f0aa04d 2024-12-01T23:54:02,220 INFO [StoreOpener-acce7aef1031d5e94b21fdd93f0aa04d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region acce7aef1031d5e94b21fdd93f0aa04d columnFamilyName cf 2024-12-01T23:54:02,220 DEBUG [StoreOpener-acce7aef1031d5e94b21fdd93f0aa04d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T23:54:02,221 INFO [StoreOpener-acce7aef1031d5e94b21fdd93f0aa04d-1 {}] regionserver.HStore(327): Store=acce7aef1031d5e94b21fdd93f0aa04d/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T23:54:02,221 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1038): replaying wal for acce7aef1031d5e94b21fdd93f0aa04d 2024-12-01T23:54:02,223 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/TestNs/TestTable/acce7aef1031d5e94b21fdd93f0aa04d 2024-12-01T23:54:02,224 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/TestNs/TestTable/acce7aef1031d5e94b21fdd93f0aa04d 2024-12-01T23:54:02,226 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1048): stopping wal replay for acce7aef1031d5e94b21fdd93f0aa04d 2024-12-01T23:54:02,226 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1060): Cleaning up temporary data for acce7aef1031d5e94b21fdd93f0aa04d 2024-12-01T23:54:02,227 INFO [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] handler.AssignRegionHandler(132): Open TestNs:TestTable,1,1733097241623.b7dc35984ce3d77666ebbdeb398352af. 2024-12-01T23:54:02,227 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7752): Opening region: {ENCODED => b7dc35984ce3d77666ebbdeb398352af, NAME => 'TestNs:TestTable,1,1733097241623.b7dc35984ce3d77666ebbdeb398352af.', STARTKEY => '1', ENDKEY => ''} 2024-12-01T23:54:02,228 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestTable b7dc35984ce3d77666ebbdeb398352af 2024-12-01T23:54:02,228 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(898): Instantiated TestNs:TestTable,1,1733097241623.b7dc35984ce3d77666ebbdeb398352af.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T23:54:02,228 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7794): checking encryption for b7dc35984ce3d77666ebbdeb398352af 2024-12-01T23:54:02,228 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7797): checking classloading for b7dc35984ce3d77666ebbdeb398352af 2024-12-01T23:54:02,235 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1093): writing seq id for acce7aef1031d5e94b21fdd93f0aa04d 2024-12-01T23:54:02,243 INFO [StoreOpener-b7dc35984ce3d77666ebbdeb398352af-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region b7dc35984ce3d77666ebbdeb398352af 2024-12-01T23:54:02,250 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/TestNs/TestTable/acce7aef1031d5e94b21fdd93f0aa04d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T23:54:02,253 INFO [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1114): Opened acce7aef1031d5e94b21fdd93f0aa04d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64802535, jitterRate=-0.03436698019504547}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-01T23:54:02,254 INFO [StoreOpener-b7dc35984ce3d77666ebbdeb398352af-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:10, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b7dc35984ce3d77666ebbdeb398352af columnFamilyName cf 2024-12-01T23:54:02,253 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1122): Running coprocessor post-open hooks for acce7aef1031d5e94b21fdd93f0aa04d 2024-12-01T23:54:02,254 DEBUG [StoreOpener-b7dc35984ce3d77666ebbdeb398352af-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T23:54:02,255 INFO [StoreOpener-b7dc35984ce3d77666ebbdeb398352af-1 {}] regionserver.HStore(327): Store=b7dc35984ce3d77666ebbdeb398352af/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T23:54:02,255 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1038): replaying wal for b7dc35984ce3d77666ebbdeb398352af 2024-12-01T23:54:02,256 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1006): Region open journal for acce7aef1031d5e94b21fdd93f0aa04d: Running coprocessor pre-open hook at 1733097242206Writing region info on filesystem at 1733097242206Initializing all the Stores at 1733097242209 (+3 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733097242210 (+1 ms)Cleaning up temporary data from old regions at 1733097242226 (+16 ms)Running coprocessor post-open hooks at 1733097242254 (+28 ms)Region opened successfully at 1733097242255 (+1 ms) 2024-12-01T23:54:02,257 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/TestNs/TestTable/b7dc35984ce3d77666ebbdeb398352af 2024-12-01T23:54:02,258 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/TestNs/TestTable/b7dc35984ce3d77666ebbdeb398352af 2024-12-01T23:54:02,258 INFO [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegionServer(2236): Post open deploy tasks for TestNs:TestTable,,1733097241623.acce7aef1031d5e94b21fdd93f0aa04d., pid=21, masterSystemTime=1733097242198 2024-12-01T23:54:02,259 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1048): stopping wal replay for b7dc35984ce3d77666ebbdeb398352af 2024-12-01T23:54:02,259 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1060): Cleaning up temporary data for b7dc35984ce3d77666ebbdeb398352af 2024-12-01T23:54:02,264 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1093): writing seq id for b7dc35984ce3d77666ebbdeb398352af 2024-12-01T23:54:02,265 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=18 updating hbase:meta row=acce7aef1031d5e94b21fdd93f0aa04d, regionState=OPEN, openSeqNum=2, regionLocation=579b0f681375,45401,1733097235193 2024-12-01T23:54:02,272 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=21, ppid=18, state=RUNNABLE, hasLock=false; OpenRegionProcedure acce7aef1031d5e94b21fdd93f0aa04d, server=579b0f681375,45401,1733097235193 because future has completed 2024-12-01T23:54:02,280 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=21, resume processing ppid=18 2024-12-01T23:54:02,280 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=21, ppid=18, state=SUCCESS, hasLock=false; OpenRegionProcedure acce7aef1031d5e94b21fdd93f0aa04d, server=579b0f681375,45401,1733097235193 in 232 msec 2024-12-01T23:54:02,283 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegionServer(2266): Finished post open deploy task for TestNs:TestTable,,1733097241623.acce7aef1031d5e94b21fdd93f0aa04d. 2024-12-01T23:54:02,284 INFO [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] handler.AssignRegionHandler(153): Opened TestNs:TestTable,,1733097241623.acce7aef1031d5e94b21fdd93f0aa04d. 2024-12-01T23:54:02,284 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/TestNs/TestTable/b7dc35984ce3d77666ebbdeb398352af/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T23:54:02,285 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=18, ppid=17, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=acce7aef1031d5e94b21fdd93f0aa04d, ASSIGN in 407 msec 2024-12-01T23:54:02,285 INFO [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1114): Opened b7dc35984ce3d77666ebbdeb398352af; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72822003, jitterRate=0.0851324051618576}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-01T23:54:02,285 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b7dc35984ce3d77666ebbdeb398352af 2024-12-01T23:54:02,285 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1006): Region open journal for b7dc35984ce3d77666ebbdeb398352af: Running coprocessor pre-open hook at 1733097242228Writing region info on filesystem at 1733097242228Initializing all the Stores at 1733097242230 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733097242230Cleaning up temporary data from old regions at 1733097242259 (+29 ms)Running coprocessor post-open hooks at 1733097242285 (+26 ms)Region opened successfully at 1733097242285 2024-12-01T23:54:02,287 INFO [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegionServer(2236): Post open deploy tasks for TestNs:TestTable,1,1733097241623.b7dc35984ce3d77666ebbdeb398352af., pid=20, masterSystemTime=1733097242194 2024-12-01T23:54:02,295 DEBUG [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegionServer(2266): Finished post open deploy task for TestNs:TestTable,1,1733097241623.b7dc35984ce3d77666ebbdeb398352af. 2024-12-01T23:54:02,295 INFO [RS_OPEN_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] handler.AssignRegionHandler(153): Opened TestNs:TestTable,1,1733097241623.b7dc35984ce3d77666ebbdeb398352af. 2024-12-01T23:54:02,295 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=19 updating hbase:meta row=b7dc35984ce3d77666ebbdeb398352af, regionState=OPEN, openSeqNum=2, regionLocation=579b0f681375,44651,1733097235352 2024-12-01T23:54:02,300 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=20, ppid=19, state=RUNNABLE, hasLock=false; OpenRegionProcedure b7dc35984ce3d77666ebbdeb398352af, server=579b0f681375,44651,1733097235352 because future has completed 2024-12-01T23:54:02,330 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=20, resume processing ppid=19 2024-12-01T23:54:02,330 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=20, ppid=19, state=SUCCESS, hasLock=false; OpenRegionProcedure b7dc35984ce3d77666ebbdeb398352af, server=579b0f681375,44651,1733097235352 in 278 msec 2024-12-01T23:54:02,336 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=19, resume processing ppid=17 2024-12-01T23:54:02,337 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=19, ppid=17, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=b7dc35984ce3d77666ebbdeb398352af, ASSIGN in 457 msec 2024-12-01T23:54:02,338 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=17, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestNs:TestTable execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-01T23:54:02,338 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestNs:TestTable","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733097242338"}]},"ts":"1733097242338"} 2024-12-01T23:54:02,342 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestNs:TestTable, state=ENABLED in hbase:meta 2024-12-01T23:54:02,344 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=17, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestNs:TestTable execute state=CREATE_TABLE_POST_OPERATION 2024-12-01T23:54:02,348 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=17, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestNs:TestTable in 720 msec 2024-12-01T23:54:02,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40611 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=17 2024-12-01T23:54:02,403 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: TestNs:TestTable completed 2024-12-01T23:54:02,403 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestNs:TestTable get assigned. Timeout = 60000ms 2024-12-01T23:54:02,403 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T23:54:02,409 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestNs:TestTable assigned to meta. Checking AM states. 2024-12-01T23:54:02,409 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T23:54:02,410 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestNs:TestTable assigned. 2024-12-01T23:54:02,410 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T23:54:02,412 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestNs:TestTable,, stopping at row=TestNs:TestTable ,, for max=2147483647 with caching=100 2024-12-01T23:54:02,418 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestNs:TestTable,, stopping at row=TestNs:TestTable ,, for max=2147483647 with caching=100 2024-12-01T23:54:02,445 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: quotas.TestClusterScopeQuotaThrottle#testUserTableClusterScopeQuota Thread=300, OpenFileDescriptor=531, MaxFileDescriptor=1048576, SystemLoadAverage=501, ProcessCount=11, AvailableMemoryMB=7174 2024-12-01T23:54:02,463 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:quota', row='u.jenkins', locateType=CURRENT is [region=hbase:quota,,1733097237538.67f67293b31d223687049a16b9d906fa., hostname=579b0f681375,44651,1733097235352, seqNum=2] 2024-12-01T23:54:02,465 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T23:54:02,467 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34107, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T23:54:02,493 DEBUG [regionserver/579b0f681375:0.Chore.1 {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-01T23:54:02,493 DEBUG [regionserver/579b0f681375:0.Chore.1 {}] client.AsyncConnectionImpl(321): The fetched master address is 579b0f681375,40611,1733097234530 2024-12-01T23:54:02,493 DEBUG [regionserver/579b0f681375:0.Chore.1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@74472b65 2024-12-01T23:54:02,499 DEBUG [regionserver/579b0f681375:0.Chore.1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-01T23:54:02,501 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44405, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=MasterService 2024-12-01T23:54:02,527 DEBUG [regionserver/579b0f681375:0.Chore.1 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T23:54:02,527 DEBUG [regionserver/579b0f681375:0.Chore.1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=579b0f681375,45401,1733097235193, seqNum=-1] 2024-12-01T23:54:02,527 DEBUG [regionserver/579b0f681375:0.Chore.1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T23:54:02,530 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56629, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=ClientService 2024-12-01T23:54:02,533 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:quota', row='n.default', locateType=CURRENT is [region=hbase:quota,,1733097237538.67f67293b31d223687049a16b9d906fa., hostname=579b0f681375,44651,1733097235352, seqNum=2] 2024-12-01T23:54:02,534 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T23:54:02,536 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:quota', row='n.TestNs', locateType=CURRENT is [region=hbase:quota,,1733097237538.67f67293b31d223687049a16b9d906fa., hostname=579b0f681375,44651,1733097235352, seqNum=2] 2024-12-01T23:54:02,538 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38757, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=ClientService 2024-12-01T23:54:02,747 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T23:54:02,748 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-12-01T23:54:02,748 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1733100842490 bypass), TestNs=QuotaState(ts=1733100842490 bypass)} 2024-12-01T23:54:02,748 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestQuotaAdmin0=QuotaState(ts=1733100842490 bypass), TestNs:TestTable=QuotaState(ts=1733100842490 bypass), TestQuotaAdmin2=QuotaState(ts=1733100842490 bypass), TestQuotaAdmin1=QuotaState(ts=1733100842490 bypass)} 2024-12-01T23:54:02,748 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1733100842490 [ TestNs:TestTable ])} 2024-12-01T23:54:02,748 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1733100842490 bypass)} 2024-12-01T23:54:02,749 DEBUG [regionserver/579b0f681375:0.Chore.1 {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-01T23:54:02,749 DEBUG [regionserver/579b0f681375:0.Chore.1 {}] client.AsyncConnectionImpl(321): The fetched master address is 579b0f681375,40611,1733097234530 2024-12-01T23:54:02,749 DEBUG [regionserver/579b0f681375:0.Chore.1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@642ba746 2024-12-01T23:54:02,749 DEBUG [regionserver/579b0f681375:0.Chore.1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-01T23:54:02,751 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52637, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=MasterService 2024-12-01T23:54:02,754 DEBUG [regionserver/579b0f681375:0.Chore.1 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T23:54:02,755 DEBUG [regionserver/579b0f681375:0.Chore.1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=579b0f681375,45401,1733097235193, seqNum=-1] 2024-12-01T23:54:02,755 DEBUG [regionserver/579b0f681375:0.Chore.1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T23:54:02,757 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53281, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=ClientService 2024-12-01T23:54:02,767 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:quota', row='n.TestNs', locateType=CURRENT is [region=hbase:quota,,1733097237538.67f67293b31d223687049a16b9d906fa., hostname=579b0f681375,44651,1733097235352, seqNum=2] 2024-12-01T23:54:02,768 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T23:54:02,771 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33179, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=ClientService 2024-12-01T23:54:02,815 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-01T23:54:02,884 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestQuotaAdmin0' 2024-12-01T23:54:02,887 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-01T23:54:02,888 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestNs:TestTable' 2024-12-01T23:54:02,889 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestQuotaAdmin2' 2024-12-01T23:54:02,890 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestQuotaAdmin1' 2024-12-01T23:54:02,891 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:quota' 2024-12-01T23:54:02,999 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T23:54:02,999 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-12-01T23:54:02,999 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {TestNs=QuotaState(ts=1733100842490 bypass)} 2024-12-01T23:54:03,000 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestNs:TestTable=QuotaState(ts=1733100842490 bypass)} 2024-12-01T23:54:03,000 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1733100842490 [ TestNs:TestTable ])} 2024-12-01T23:54:03,000 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1733100842490 bypass)} 2024-12-01T23:54:03,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44651 {}] quotas.RegionServerRpcQuotaManager(245): Throttling exception for user=jenkins table=TestNs:TestTable numWrites=0 numReads=1: number of read requests exceeded - wait 6mins, 0ms 2024-12-01T23:54:03,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44651 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Get size: 118 connection: 172.17.0.2:50794 deadline: 1733097253057, exception=org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms 2024-12-01T23:54:03,071 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestNs:TestTable,1,1733097241623.b7dc35984ce3d77666ebbdeb398352af., hostname=579b0f681375,44651,1733097235352, seqNum=-1 , the old value is region=TestNs:TestTable,1,1733097241623.b7dc35984ce3d77666ebbdeb398352af., hostname=579b0f681375,44651,1733097235352, seqNum=-1, error=org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T23:54:03,072 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestNs:TestTable,1,1733097241623.b7dc35984ce3d77666ebbdeb398352af., hostname=579b0f681375,44651,1733097235352, seqNum=-1 is org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T23:54:03,072 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestNs:TestTable,1,1733097241623.b7dc35984ce3d77666ebbdeb398352af., hostname=579b0f681375,44651,1733097235352, seqNum=-1 because the exception is null or not the one we care about 2024-12-01T23:54:03,073 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] backoff.HBaseServerExceptionPauseManager(61): RpcThrottlingException suggested pause of 360000000000ns which would exceed the timeout. We should throw instead. org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-12-01T23:54:03,075 ERROR [Time-limited test {}] quotas.ThrottleQuotaTestUtil(110): get failed after nRetries=10 org.apache.hadoop.hbase.client.RetriesExhaustedException: Failed after attempts=1, exceptions: 2024-12-01T23:54:03.073Z, org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.TableOverAsyncTable.get(TableOverAsyncTable.java:188) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doGets(ThrottleQuotaTestUtil.java:105) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.testUserTableClusterScopeQuota(TestClusterScopeQuotaThrottle.java:224) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] at --------Future.get--------(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.completeExceptionally(AsyncRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.tryScheduleRetry(AsyncRpcRetryingCaller.java:130) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:215) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 42 more Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-12-01T23:54:03,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44651 {}] quotas.RegionServerRpcQuotaManager(245): Throttling exception for user=jenkins table=TestNs:TestTable numWrites=0 numReads=1: number of read requests exceeded - wait 6mins, 0ms 2024-12-01T23:54:03,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44651 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Get size: 117 connection: 172.17.0.2:50794 deadline: 1733097253077, exception=org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms 2024-12-01T23:54:03,082 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestNs:TestTable,1,1733097241623.b7dc35984ce3d77666ebbdeb398352af., hostname=579b0f681375,44651,1733097235352, seqNum=-1 , the old value is region=TestNs:TestTable,1,1733097241623.b7dc35984ce3d77666ebbdeb398352af., hostname=579b0f681375,44651,1733097235352, seqNum=-1, error=org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T23:54:03,083 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestNs:TestTable,1,1733097241623.b7dc35984ce3d77666ebbdeb398352af., hostname=579b0f681375,44651,1733097235352, seqNum=-1 is org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T23:54:03,083 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestNs:TestTable,1,1733097241623.b7dc35984ce3d77666ebbdeb398352af., hostname=579b0f681375,44651,1733097235352, seqNum=-1 because the exception is null or not the one we care about 2024-12-01T23:54:03,083 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] backoff.HBaseServerExceptionPauseManager(61): RpcThrottlingException suggested pause of 360000000000ns which would exceed the timeout. We should throw instead. org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-12-01T23:54:03,085 ERROR [Time-limited test {}] quotas.ThrottleQuotaTestUtil(110): get failed after nRetries=0 org.apache.hadoop.hbase.client.RetriesExhaustedException: Failed after attempts=1, exceptions: 2024-12-01T23:54:03.083Z, org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.TableOverAsyncTable.get(TableOverAsyncTable.java:188) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doGets(ThrottleQuotaTestUtil.java:105) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.testUserTableClusterScopeQuota(TestClusterScopeQuotaThrottle.java:224) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] at --------Future.get--------(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.completeExceptionally(AsyncRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.tryScheduleRetry(AsyncRpcRetryingCaller.java:130) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:215) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 42 more Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-12-01T23:54:03,353 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T23:54:03,353 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-12-01T23:54:03,353 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1733104442490 bypass), TestNs=QuotaState(ts=1733104442490 bypass)} 2024-12-01T23:54:03,353 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestQuotaAdmin0=QuotaState(ts=1733104442490 bypass), TestNs:TestTable=QuotaState(ts=1733104442490 bypass), TestQuotaAdmin2=QuotaState(ts=1733104442490 bypass), TestQuotaAdmin1=QuotaState(ts=1733104442490 bypass)} 2024-12-01T23:54:03,353 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1733104442490 bypass)} 2024-12-01T23:54:03,353 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1733104442490 bypass)} 2024-12-01T23:54:03,604 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T23:54:03,604 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-12-01T23:54:03,604 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {TestNs=QuotaState(ts=1733104442490 bypass)} 2024-12-01T23:54:03,604 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestNs:TestTable=QuotaState(ts=1733104442490 bypass)} 2024-12-01T23:54:03,604 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1733104442490 bypass)} 2024-12-01T23:54:03,604 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1733104442490 bypass)} 2024-12-01T23:54:03,621 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: quotas.TestClusterScopeQuotaThrottle#testUserTableClusterScopeQuota Thread=301 (was 300) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) - Thread LEAK? -, OpenFileDescriptor=545 (was 531) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=501 (was 501), ProcessCount=11 (was 11), AvailableMemoryMB=7150 (was 7174) 2024-12-01T23:54:03,632 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: quotas.TestClusterScopeQuotaThrottle#testUserNamespaceClusterScopeQuota Thread=301, OpenFileDescriptor=545, MaxFileDescriptor=1048576, SystemLoadAverage=501, ProcessCount=11, AvailableMemoryMB=7150 2024-12-01T23:54:03,912 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T23:54:03,912 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:04,163 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-12-01T23:54:04,163 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1733108042590 bypass), TestNs=QuotaState(ts=1733108042590 bypass)} 2024-12-01T23:54:04,163 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestQuotaAdmin0=QuotaState(ts=1733108042590 bypass), TestNs:TestTable=QuotaState(ts=1733108042590 bypass), TestQuotaAdmin2=QuotaState(ts=1733108042590 bypass), TestQuotaAdmin1=QuotaState(ts=1733108042590 bypass)} 2024-12-01T23:54:04,163 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1733108042590 [ default ])} 2024-12-01T23:54:04,163 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1733108042590 bypass)} 2024-12-01T23:54:04,414 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T23:54:04,414 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:04,665 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:04,850 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-01T23:54:04,851 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-01T23:54:04,853 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestQuotaAdmin0 2024-12-01T23:54:04,853 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestQuotaAdmin0 Metrics about Tables on a single HBase RegionServer 2024-12-01T23:54:04,854 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestQuotaAdmin2 2024-12-01T23:54:04,856 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestQuotaAdmin2 Metrics about Tables on a single HBase RegionServer 2024-12-01T23:54:04,857 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_quota 2024-12-01T23:54:04,857 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_quota Metrics about Tables on a single HBase RegionServer 2024-12-01T23:54:04,858 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-01T23:54:04,858 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-01T23:54:04,859 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_TestNs_table_TestTable 2024-12-01T23:54:04,859 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_TestNs_table_TestTable Metrics about Tables on a single HBase RegionServer 2024-12-01T23:54:04,859 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.quotas.MasterQuotasObserver 2024-12-01T23:54:04,859 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.quotas.MasterQuotasObserver Metrics about HBase MasterObservers 2024-12-01T23:54:04,860 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-01T23:54:04,860 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-01T23:54:04,860 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestQuotaAdmin1 2024-12-01T23:54:04,860 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestQuotaAdmin1 Metrics about Tables on a single HBase RegionServer 2024-12-01T23:54:04,915 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:05,166 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:05,416 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:05,667 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:05,918 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:06,168 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:06,419 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:06,669 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:06,920 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:07,170 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:07,421 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:07,671 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:07,708 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-01T23:54:07,922 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:08,172 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:08,423 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:08,673 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:08,924 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:09,174 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:09,425 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:09,675 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:09,926 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:10,176 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:10,427 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:10,677 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:10,928 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:11,178 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:11,429 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:11,679 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:11,930 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:12,180 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:12,431 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:12,681 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:12,932 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:13,182 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:13,433 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:13,683 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:13,934 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:14,184 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:14,435 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:14,685 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:14,935 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:15,186 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:15,436 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:15,687 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:15,938 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:16,188 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:16,438 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:16,689 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:16,939 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:17,190 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:17,440 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:17,691 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:17,941 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:18,192 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:18,442 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:18,693 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:18,943 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:19,194 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:19,444 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:19,694 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:19,945 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:20,195 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:20,446 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:20,696 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:20,947 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:21,197 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:21,448 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:21,699 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:21,949 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:22,200 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:22,451 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:22,701 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:22,952 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:23,203 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:23,454 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:23,664 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-01T23:54:23,705 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:23,955 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:24,206 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:24,457 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:24,707 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:24,958 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:25,209 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:25,460 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:25,710 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:25,961 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:26,212 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:26,463 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:26,713 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:26,964 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:27,215 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:27,466 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:27,716 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:27,967 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:28,217 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:28,468 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:28,719 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:28,969 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:29,220 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:29,471 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:29,721 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:29,971 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:30,222 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:30,472 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:30,723 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:30,973 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:31,224 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:31,474 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:31,725 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:31,975 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:32,226 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:32,477 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:32,727 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:32,978 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:33,228 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:33,479 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:33,729 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:33,980 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:34,230 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:34,481 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:34,731 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:34,981 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:35,232 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:35,482 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:35,733 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:35,983 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:36,234 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:36,484 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:36,735 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:36,985 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:37,236 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:37,486 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:37,708 INFO [master/579b0f681375:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-01T23:54:37,708 INFO [master/579b0f681375:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-01T23:54:37,736 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:37,987 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:38,237 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:38,488 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:38,738 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:38,989 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:39,239 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:39,489 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:39,740 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:39,990 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:40,241 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:40,491 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:40,742 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:40,992 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:41,243 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:41,493 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:41,744 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:41,994 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:42,245 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:42,495 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:42,746 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:42,996 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:43,247 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:43,497 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:43,748 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:43,998 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:44,249 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:44,499 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:44,749 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:45,000 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:45,250 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:45,501 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:45,751 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:46,002 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:46,252 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:46,503 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:46,753 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:47,004 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:47,254 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:47,505 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:47,755 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:48,006 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:48,256 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:48,507 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:48,757 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:49,008 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:49,258 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:49,509 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:49,760 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:50,011 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:50,261 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:50,512 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:50,763 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:51,013 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:51,264 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:51,515 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:51,766 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:52,016 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:52,267 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:52,518 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:52,769 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:53,019 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:53,270 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:53,521 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:53,665 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-01T23:54:53,772 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:54,022 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:54,273 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:54,524 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:54,775 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:55,025 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:55,276 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:55,526 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:55,777 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:56,028 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:56,278 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:56,529 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:56,779 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:57,030 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:57,280 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:57,530 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:57,781 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:58,031 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:58,282 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:58,290 DEBUG [master/579b0f681375:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region b7dc35984ce3d77666ebbdeb398352af changed from -1.0 to 0.0, refreshing cache 2024-12-01T23:54:58,291 DEBUG [master/579b0f681375:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 67f67293b31d223687049a16b9d906fa changed from -1.0 to 0.0, refreshing cache 2024-12-01T23:54:58,292 DEBUG [master/579b0f681375:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region acce7aef1031d5e94b21fdd93f0aa04d changed from -1.0 to 0.0, refreshing cache 2024-12-01T23:54:58,294 DEBUG [master/579b0f681375:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 3db8e2be75873790be5eaad9cf5be7a6 changed from -1.0 to 0.0, refreshing cache 2024-12-01T23:54:58,295 DEBUG [master/579b0f681375:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 6bde2c94473bacf7f0b53f286324955b changed from -1.0 to 0.0, refreshing cache 2024-12-01T23:54:58,295 DEBUG [master/579b0f681375:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1e69caa932f6b592705f1016b011510c changed from -1.0 to 0.0, refreshing cache 2024-12-01T23:54:58,532 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:58,783 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:59,033 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:59,284 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:59,534 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:54:59,785 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:55:00,035 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:55:00,286 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:55:00,536 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:55:00,786 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:55:01,037 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:55:01,287 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:55:01,538 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:55:01,788 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:55:02,039 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:55:02,289 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:55:02,540 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:55:02,790 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:55:03,041 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:55:03,291 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:55:03,542 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:55:03,792 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:55:04,043 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:55:04,293 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:55:04,414 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:55:04,427 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: quotas.TestClusterScopeQuotaThrottle#testUserNamespaceClusterScopeQuota Thread=282 (was 301), OpenFileDescriptor=531 (was 545), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=420 (was 501), ProcessCount=11 (was 11), AvailableMemoryMB=6717 (was 7150) 2024-12-01T23:55:04,435 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: quotas.TestClusterScopeQuotaThrottle#testUserClusterScopeQuota Thread=283, OpenFileDescriptor=531, MaxFileDescriptor=1048576, SystemLoadAverage=420, ProcessCount=11, AvailableMemoryMB=6717 2024-12-01T23:55:04,696 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T23:55:04,696 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:55:04,946 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-12-01T23:55:04,946 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1733111666790 bypass), TestNs=QuotaState(ts=1733111666790 bypass)} 2024-12-01T23:55:04,947 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestQuotaAdmin0=QuotaState(ts=1733111666790 bypass), TestNs:TestTable=QuotaState(ts=1733111666790 bypass), TestQuotaAdmin2=QuotaState(ts=1733111666790 bypass), TestQuotaAdmin1=QuotaState(ts=1733111666790 bypass)} 2024-12-01T23:55:04,947 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1733111666790 global-limiter [ default ])} 2024-12-01T23:55:04,947 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1733111666790 bypass)} 2024-12-01T23:55:05,197 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T23:55:05,197 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(217): User limiter for user=jenkins (auth:SIMPLE), table=TestQuotaAdmin0 not refreshed, bypass expected false, actual true 2024-12-01T23:55:05,448 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-12-01T23:55:05,448 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {TestNs=QuotaState(ts=1733111666890 bypass)} 2024-12-01T23:55:05,448 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestNs:TestTable=QuotaState(ts=1733111666890 bypass)} 2024-12-01T23:55:05,448 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1733111666890 global-limiter)} 2024-12-01T23:55:05,448 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1733111666890 bypass)} 2024-12-01T23:55:05,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45401 {}] quotas.RegionServerRpcQuotaManager(245): Throttling exception for user=jenkins table=TestQuotaAdmin0 numWrites=1 numReads=0: number of write requests exceeded - wait 10sec, 0ms 2024-12-01T23:55:05,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45401 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 143 connection: 172.17.0.2:45098 deadline: 1733097315464, exception=org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms 2024-12-01T23:55:05,466 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestQuotaAdmin0,,1733097238113.3db8e2be75873790be5eaad9cf5be7a6., hostname=579b0f681375,45401,1733097235193, seqNum=-1 , the old value is region=TestQuotaAdmin0,,1733097238113.3db8e2be75873790be5eaad9cf5be7a6., hostname=579b0f681375,45401,1733097235193, seqNum=-1, error=org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T23:55:05,466 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestQuotaAdmin0,,1733097238113.3db8e2be75873790be5eaad9cf5be7a6., hostname=579b0f681375,45401,1733097235193, seqNum=-1 is org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T23:55:05,466 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestQuotaAdmin0,,1733097238113.3db8e2be75873790be5eaad9cf5be7a6., hostname=579b0f681375,45401,1733097235193, seqNum=-1 because the exception is null or not the one we care about 2024-12-01T23:55:05,466 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] backoff.HBaseServerExceptionPauseManager(61): RpcThrottlingException suggested pause of 10000000000ns which would exceed the timeout. We should throw instead. org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-12-01T23:55:05,468 ERROR [Time-limited test {}] quotas.ThrottleQuotaTestUtil(86): put failed after nRetries=6 org.apache.hadoop.hbase.client.RetriesExhaustedException: Failed after attempts=1, exceptions: 2024-12-01T23:55:05.466Z, org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.TableOverAsyncTable.put(TableOverAsyncTable.java:213) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doPuts(ThrottleQuotaTestUtil.java:81) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doPuts(ThrottleQuotaTestUtil.java:64) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.testUserClusterScopeQuota(TestClusterScopeQuotaThrottle.java:178) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] at --------Future.get--------(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.completeExceptionally(AsyncRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.tryScheduleRetry(AsyncRpcRetryingCaller.java:130) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:215) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 42 more Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-12-01T23:55:05,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45401 {}] quotas.RegionServerRpcQuotaManager(245): Throttling exception for user=jenkins table=TestQuotaAdmin0 numWrites=0 numReads=1: number of read requests exceeded - wait 12sec, 0ms 2024-12-01T23:55:05,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45401 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Get size: 115 connection: 172.17.0.2:45098 deadline: 1733097315475, exception=org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 12sec, 0ms 2024-12-01T23:55:05,477 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestQuotaAdmin0,,1733097238113.3db8e2be75873790be5eaad9cf5be7a6., hostname=579b0f681375,45401,1733097235193, seqNum=-1 , the old value is region=TestQuotaAdmin0,,1733097238113.3db8e2be75873790be5eaad9cf5be7a6., hostname=579b0f681375,45401,1733097235193, seqNum=-1, error=org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T23:55:05,477 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestQuotaAdmin0,,1733097238113.3db8e2be75873790be5eaad9cf5be7a6., hostname=579b0f681375,45401,1733097235193, seqNum=-1 is org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T23:55:05,478 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestQuotaAdmin0,,1733097238113.3db8e2be75873790be5eaad9cf5be7a6., hostname=579b0f681375,45401,1733097235193, seqNum=-1 because the exception is null or not the one we care about 2024-12-01T23:55:05,478 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] backoff.HBaseServerExceptionPauseManager(61): RpcThrottlingException suggested pause of 12000000000ns which would exceed the timeout. We should throw instead. org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-12-01T23:55:05,479 ERROR [Time-limited test {}] quotas.ThrottleQuotaTestUtil(110): get failed after nRetries=5 org.apache.hadoop.hbase.client.RetriesExhaustedException: Failed after attempts=1, exceptions: 2024-12-01T23:55:05.478Z, org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.TableOverAsyncTable.get(TableOverAsyncTable.java:188) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doGets(ThrottleQuotaTestUtil.java:105) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.testUserClusterScopeQuota(TestClusterScopeQuotaThrottle.java:179) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] at --------Future.get--------(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.completeExceptionally(AsyncRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.tryScheduleRetry(AsyncRpcRetryingCaller.java:130) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:215) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 42 more Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-12-01T23:55:05,489 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: quotas.TestClusterScopeQuotaThrottle#testUserClusterScopeQuota Thread=283 (was 283), OpenFileDescriptor=531 (was 531), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=420 (was 420), ProcessCount=11 (was 11), AvailableMemoryMB=6714 (was 6717) 2024-12-01T23:55:05,497 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: quotas.TestClusterScopeQuotaThrottle#testTableClusterScopeQuota Thread=283, OpenFileDescriptor=531, MaxFileDescriptor=1048576, SystemLoadAverage=420, ProcessCount=11, AvailableMemoryMB=6713 2024-12-01T23:55:05,752 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T23:55:05,753 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-12-01T23:55:05,753 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1733115266890 bypass), TestNs=QuotaState(ts=1733115266890 bypass)} 2024-12-01T23:55:05,755 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestQuotaAdmin0=QuotaState(ts=1733115266890 bypass), TestNs:TestTable=QuotaState(ts=1733115266890 TimeBasedLimiter( readReqs=AverageIntervalRateLimiter(avail=10 limit=10 tunit=3600000))), TestQuotaAdmin2=QuotaState(ts=1733115266890 bypass), TestQuotaAdmin1=QuotaState(ts=1733115266890 bypass)} 2024-12-01T23:55:05,755 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {} 2024-12-01T23:55:05,755 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1733115266890 bypass)} 2024-12-01T23:55:06,005 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T23:55:06,005 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-12-01T23:55:06,006 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {TestNs=QuotaState(ts=1733115266890 bypass)} 2024-12-01T23:55:06,006 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestNs:TestTable=QuotaState(ts=1733115266890 TimeBasedLimiter( readReqs=AverageIntervalRateLimiter(avail=10 limit=10 tunit=3600000)))} 2024-12-01T23:55:06,006 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {} 2024-12-01T23:55:06,006 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1733115266890 bypass)} 2024-12-01T23:55:06,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44651 {}] quotas.RegionServerRpcQuotaManager(245): Throttling exception for user=jenkins table=TestNs:TestTable numWrites=0 numReads=1: number of read requests exceeded - wait 6mins, 0ms 2024-12-01T23:55:06,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44651 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Get size: 118 connection: 172.17.0.2:50794 deadline: 1733097316016, exception=org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms 2024-12-01T23:55:06,018 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestNs:TestTable,1,1733097241623.b7dc35984ce3d77666ebbdeb398352af., hostname=579b0f681375,44651,1733097235352, seqNum=-1 , the old value is region=TestNs:TestTable,1,1733097241623.b7dc35984ce3d77666ebbdeb398352af., hostname=579b0f681375,44651,1733097235352, seqNum=-1, error=org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T23:55:06,018 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestNs:TestTable,1,1733097241623.b7dc35984ce3d77666ebbdeb398352af., hostname=579b0f681375,44651,1733097235352, seqNum=-1 is org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T23:55:06,018 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestNs:TestTable,1,1733097241623.b7dc35984ce3d77666ebbdeb398352af., hostname=579b0f681375,44651,1733097235352, seqNum=-1 because the exception is null or not the one we care about 2024-12-01T23:55:06,018 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] backoff.HBaseServerExceptionPauseManager(61): RpcThrottlingException suggested pause of 360000000000ns which would exceed the timeout. We should throw instead. org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-12-01T23:55:06,019 ERROR [Time-limited test {}] quotas.ThrottleQuotaTestUtil(110): get failed after nRetries=10 org.apache.hadoop.hbase.client.RetriesExhaustedException: Failed after attempts=1, exceptions: 2024-12-01T23:55:06.018Z, org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.TableOverAsyncTable.get(TableOverAsyncTable.java:188) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doGets(ThrottleQuotaTestUtil.java:105) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.testTableClusterScopeQuota(TestClusterScopeQuotaThrottle.java:151) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] at --------Future.get--------(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.completeExceptionally(AsyncRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.tryScheduleRetry(AsyncRpcRetryingCaller.java:130) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:215) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 42 more Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-12-01T23:55:06,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44651 {}] quotas.RegionServerRpcQuotaManager(245): Throttling exception for user=jenkins table=TestNs:TestTable numWrites=0 numReads=1: number of read requests exceeded - wait 6mins, 0ms 2024-12-01T23:55:06,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44651 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Get size: 117 connection: 172.17.0.2:50794 deadline: 1733097316020, exception=org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms 2024-12-01T23:55:06,021 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestNs:TestTable,1,1733097241623.b7dc35984ce3d77666ebbdeb398352af., hostname=579b0f681375,44651,1733097235352, seqNum=-1 , the old value is region=TestNs:TestTable,1,1733097241623.b7dc35984ce3d77666ebbdeb398352af., hostname=579b0f681375,44651,1733097235352, seqNum=-1, error=org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T23:55:06,022 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestNs:TestTable,1,1733097241623.b7dc35984ce3d77666ebbdeb398352af., hostname=579b0f681375,44651,1733097235352, seqNum=-1 is org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T23:55:06,022 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestNs:TestTable,1,1733097241623.b7dc35984ce3d77666ebbdeb398352af., hostname=579b0f681375,44651,1733097235352, seqNum=-1 because the exception is null or not the one we care about 2024-12-01T23:55:06,022 DEBUG [RPCClient-NioEventLoopGroup-5-5 {}] backoff.HBaseServerExceptionPauseManager(61): RpcThrottlingException suggested pause of 360000000000ns which would exceed the timeout. We should throw instead. org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-12-01T23:55:06,022 ERROR [Time-limited test {}] quotas.ThrottleQuotaTestUtil(110): get failed after nRetries=0 org.apache.hadoop.hbase.client.RetriesExhaustedException: Failed after attempts=1, exceptions: 2024-12-01T23:55:06.022Z, org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.TableOverAsyncTable.get(TableOverAsyncTable.java:188) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doGets(ThrottleQuotaTestUtil.java:105) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.testTableClusterScopeQuota(TestClusterScopeQuotaThrottle.java:151) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] at --------Future.get--------(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.completeExceptionally(AsyncRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.tryScheduleRetry(AsyncRpcRetryingCaller.java:130) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:215) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 42 more Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 6mins, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-12-01T23:55:06,279 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T23:55:06,279 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-12-01T23:55:06,279 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1733118866890 bypass), TestNs=QuotaState(ts=1733118866890 bypass)} 2024-12-01T23:55:06,279 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestQuotaAdmin0=QuotaState(ts=1733118866890 bypass), TestNs:TestTable=QuotaState(ts=1733118866890 bypass), TestQuotaAdmin2=QuotaState(ts=1733118866890 bypass), TestQuotaAdmin1=QuotaState(ts=1733118866890 bypass)} 2024-12-01T23:55:06,279 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {} 2024-12-01T23:55:06,279 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1733118866890 bypass)} 2024-12-01T23:55:06,530 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T23:55:06,530 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-12-01T23:55:06,530 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {TestNs=QuotaState(ts=1733118866890 bypass)} 2024-12-01T23:55:06,530 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestNs:TestTable=QuotaState(ts=1733118866890 bypass)} 2024-12-01T23:55:06,530 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1733118866890 global-limiter)} 2024-12-01T23:55:06,530 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1733118866890 bypass)} 2024-12-01T23:55:06,540 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: quotas.TestClusterScopeQuotaThrottle#testTableClusterScopeQuota Thread=283 (was 283), OpenFileDescriptor=531 (was 531), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=420 (was 420), ProcessCount=11 (was 11), AvailableMemoryMB=6721 (was 6713) - AvailableMemoryMB LEAK? - 2024-12-01T23:55:06,549 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: quotas.TestClusterScopeQuotaThrottle#testNamespaceClusterScopeQuota Thread=283, OpenFileDescriptor=531, MaxFileDescriptor=1048576, SystemLoadAverage=420, ProcessCount=11, AvailableMemoryMB=6720 2024-12-01T23:55:06,808 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T23:55:06,809 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-12-01T23:55:06,809 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1733122466890 TimeBasedLimiter( writeReqs=AverageIntervalRateLimiter(avail=5 limit=5 tunit=60000) readReqs=AverageIntervalRateLimiter(avail=6 limit=6 tunit=60000))), TestNs=QuotaState(ts=1733122466890 bypass)} 2024-12-01T23:55:06,809 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestQuotaAdmin0=QuotaState(ts=1733122466890 bypass), TestNs:TestTable=QuotaState(ts=1733122466890 bypass), TestQuotaAdmin2=QuotaState(ts=1733122466890 bypass), TestQuotaAdmin1=QuotaState(ts=1733122466890 bypass)} 2024-12-01T23:55:06,809 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {} 2024-12-01T23:55:06,809 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1733122466890 bypass)} 2024-12-01T23:55:07,060 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T23:55:07,060 INFO [Time-limited test {}] quotas.ThrottleQuotaTestUtil$1(239): Namespace limiter for namespace=default not refreshed, bypass expected false, actual true 2024-12-01T23:55:07,310 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-12-01T23:55:07,310 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1733122466990 TimeBasedLimiter( writeReqs=AverageIntervalRateLimiter(avail=5 limit=5 tunit=60000) readReqs=AverageIntervalRateLimiter(avail=6 limit=6 tunit=60000))), TestNs=QuotaState(ts=1733122466990 bypass)} 2024-12-01T23:55:07,310 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestNs:TestTable=QuotaState(ts=1733122466990 bypass)} 2024-12-01T23:55:07,310 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {} 2024-12-01T23:55:07,310 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1733122466990 bypass)} 2024-12-01T23:55:07,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45401 {}] quotas.RegionServerRpcQuotaManager(245): Throttling exception for user=jenkins table=TestQuotaAdmin0 numWrites=1 numReads=0: number of write requests exceeded - wait 12sec, 0ms 2024-12-01T23:55:07,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45401 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 143 connection: 172.17.0.2:45098 deadline: 1733097317323, exception=org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 12sec, 0ms 2024-12-01T23:55:07,325 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestQuotaAdmin0,,1733097238113.3db8e2be75873790be5eaad9cf5be7a6., hostname=579b0f681375,45401,1733097235193, seqNum=-1 , the old value is region=TestQuotaAdmin0,,1733097238113.3db8e2be75873790be5eaad9cf5be7a6., hostname=579b0f681375,45401,1733097235193, seqNum=-1, error=org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T23:55:07,325 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestQuotaAdmin0,,1733097238113.3db8e2be75873790be5eaad9cf5be7a6., hostname=579b0f681375,45401,1733097235193, seqNum=-1 is org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T23:55:07,325 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestQuotaAdmin0,,1733097238113.3db8e2be75873790be5eaad9cf5be7a6., hostname=579b0f681375,45401,1733097235193, seqNum=-1 because the exception is null or not the one we care about 2024-12-01T23:55:07,325 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] backoff.HBaseServerExceptionPauseManager(61): RpcThrottlingException suggested pause of 12000000000ns which would exceed the timeout. We should throw instead. org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-12-01T23:55:07,326 ERROR [Time-limited test {}] quotas.ThrottleQuotaTestUtil(86): put failed after nRetries=5 org.apache.hadoop.hbase.client.RetriesExhaustedException: Failed after attempts=1, exceptions: 2024-12-01T23:55:07.325Z, org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.TableOverAsyncTable.put(TableOverAsyncTable.java:213) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doPuts(ThrottleQuotaTestUtil.java:81) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doPuts(ThrottleQuotaTestUtil.java:64) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.testNamespaceClusterScopeQuota(TestClusterScopeQuotaThrottle.java:128) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] at --------Future.get--------(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.completeExceptionally(AsyncRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.tryScheduleRetry(AsyncRpcRetryingCaller.java:130) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:215) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 42 more Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of write requests exceeded - wait 12sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumWriteRequestsExceeded(RpcThrottlingException.java:104) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:163) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:191) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2952) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-12-01T23:55:07,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45401 {}] quotas.RegionServerRpcQuotaManager(245): Throttling exception for user=jenkins table=TestQuotaAdmin0 numWrites=0 numReads=1: number of read requests exceeded - wait 10sec, 0ms 2024-12-01T23:55:07,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45401 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Get size: 115 connection: 172.17.0.2:45098 deadline: 1733097317336, exception=org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 10sec, 0ms 2024-12-01T23:55:07,338 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestQuotaAdmin0,,1733097238113.3db8e2be75873790be5eaad9cf5be7a6., hostname=579b0f681375,45401,1733097235193, seqNum=-1 , the old value is region=TestQuotaAdmin0,,1733097238113.3db8e2be75873790be5eaad9cf5be7a6., hostname=579b0f681375,45401,1733097235193, seqNum=-1, error=org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T23:55:07,339 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestQuotaAdmin0,,1733097238113.3db8e2be75873790be5eaad9cf5be7a6., hostname=579b0f681375,45401,1733097235193, seqNum=-1 is org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-01T23:55:07,339 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestQuotaAdmin0,,1733097238113.3db8e2be75873790be5eaad9cf5be7a6., hostname=579b0f681375,45401,1733097235193, seqNum=-1 because the exception is null or not the one we care about 2024-12-01T23:55:07,339 DEBUG [RPCClient-NioEventLoopGroup-5-3 {}] backoff.HBaseServerExceptionPauseManager(61): RpcThrottlingException suggested pause of 10000000000ns which would exceed the timeout. We should throw instead. org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-12-01T23:55:07,340 ERROR [Time-limited test {}] quotas.ThrottleQuotaTestUtil(110): get failed after nRetries=6 org.apache.hadoop.hbase.client.RetriesExhaustedException: Failed after attempts=1, exceptions: 2024-12-01T23:55:07.339Z, org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.TableOverAsyncTable.get(TableOverAsyncTable.java:188) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.quotas.ThrottleQuotaTestUtil.doGets(ThrottleQuotaTestUtil.java:105) ~[test-classes/:?] at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.testNamespaceClusterScopeQuota(TestClusterScopeQuotaThrottle.java:129) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] at --------Future.get--------(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.completeExceptionally(AsyncRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.tryScheduleRetry(AsyncRpcRetryingCaller.java:130) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:215) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.ConnectionUtils$2.run(ConnectionUtils.java:625) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.quotas.RpcThrottlingException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 42 more Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.quotas.RpcThrottlingException: number of read requests exceeded - wait 10sec, 0ms at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwThrottlingException(RpcThrottlingException.java:133) at org.apache.hadoop.hbase.quotas.RpcThrottlingException.throwNumReadRequestsExceeded(RpcThrottlingException.java:99) at org.apache.hadoop.hbase.quotas.TimeBasedLimiter.checkQuota(TimeBasedLimiter.java:178) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkQuota(DefaultOperationQuota.java:121) at org.apache.hadoop.hbase.quotas.DefaultOperationQuota.checkBatchQuota(DefaultOperationQuota.java:97) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:243) at org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager.checkBatchQuota(RegionServerRpcQuotaManager.java:189) at org.apache.hadoop.hbase.regionserver.RSRpcServices.get(RSRpcServices.java:2490) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43504) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 32 more 2024-12-01T23:55:07,598 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T23:55:07,598 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-12-01T23:55:07,598 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1733126066990 bypass), TestNs=QuotaState(ts=1733126066990 bypass)} 2024-12-01T23:55:07,598 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestQuotaAdmin0=QuotaState(ts=1733126066990 bypass), TestNs:TestTable=QuotaState(ts=1733126066990 bypass), TestQuotaAdmin2=QuotaState(ts=1733126066990 bypass), TestQuotaAdmin1=QuotaState(ts=1733126066990 bypass)} 2024-12-01T23:55:07,598 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {jenkins=UserQuotaState(ts=1733126066990 global-limiter [ default ])} 2024-12-01T23:55:07,598 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1733126066990 bypass)} 2024-12-01T23:55:07,849 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T23:55:07,849 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(279): QuotaCache 2024-12-01T23:55:07,849 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(280): {default=QuotaState(ts=1733126066990 bypass), TestNs=QuotaState(ts=1733126066990 bypass)} 2024-12-01T23:55:07,849 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(281): {TestNs:TestTable=QuotaState(ts=1733126066990 bypass)} 2024-12-01T23:55:07,849 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(282): {} 2024-12-01T23:55:07,849 DEBUG [Time-limited test {}] quotas.ThrottleQuotaTestUtil(283): {all=QuotaState(ts=1733126066990 bypass)} 2024-12-01T23:55:07,860 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: quotas.TestClusterScopeQuotaThrottle#testNamespaceClusterScopeQuota Thread=281 (was 283), OpenFileDescriptor=527 (was 531), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=410 (was 420), ProcessCount=11 (was 11), AvailableMemoryMB=6715 (was 6720) 2024-12-01T23:55:07,864 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40611 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable TestQuotaAdmin0 2024-12-01T23:55:07,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40611 {}] procedure2.ProcedureExecutor(1139): Stored pid=22, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=TestQuotaAdmin0 2024-12-01T23:55:07,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40611 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=22 2024-12-01T23:55:07,873 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin0","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733097307872"}]},"ts":"1733097307872"} 2024-12-01T23:55:07,874 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin0, state=DISABLING in hbase:meta 2024-12-01T23:55:07,875 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestQuotaAdmin0 to state=DISABLING 2024-12-01T23:55:07,877 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=TestQuotaAdmin0}] 2024-12-01T23:55:07,883 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=24, ppid=23, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin0, region=3db8e2be75873790be5eaad9cf5be7a6, UNASSIGN}] 2024-12-01T23:55:07,884 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=24, ppid=23, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin0, region=3db8e2be75873790be5eaad9cf5be7a6, UNASSIGN 2024-12-01T23:55:07,886 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=24 updating hbase:meta row=3db8e2be75873790be5eaad9cf5be7a6, regionState=CLOSING, regionLocation=579b0f681375,45401,1733097235193 2024-12-01T23:55:07,888 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=24, ppid=23, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestQuotaAdmin0, region=3db8e2be75873790be5eaad9cf5be7a6, UNASSIGN because future has completed 2024-12-01T23:55:07,889 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-01T23:55:07,889 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3db8e2be75873790be5eaad9cf5be7a6, server=579b0f681375,45401,1733097235193}] 2024-12-01T23:55:08,047 INFO [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] handler.UnassignRegionHandler(122): Close 3db8e2be75873790be5eaad9cf5be7a6 2024-12-01T23:55:08,047 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-01T23:55:08,048 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegion(1722): Closing 3db8e2be75873790be5eaad9cf5be7a6, disabling compactions & flushes 2024-12-01T23:55:08,048 INFO [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegion(1755): Closing region TestQuotaAdmin0,,1733097238113.3db8e2be75873790be5eaad9cf5be7a6. 2024-12-01T23:55:08,048 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegion(1776): Time limited wait for close lock on TestQuotaAdmin0,,1733097238113.3db8e2be75873790be5eaad9cf5be7a6. 2024-12-01T23:55:08,048 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegion(1843): Acquired close lock on TestQuotaAdmin0,,1733097238113.3db8e2be75873790be5eaad9cf5be7a6. after waiting 0 ms 2024-12-01T23:55:08,048 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegion(1853): Updates disabled for region TestQuotaAdmin0,,1733097238113.3db8e2be75873790be5eaad9cf5be7a6. 2024-12-01T23:55:08,052 INFO [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegion(2902): Flushing 3db8e2be75873790be5eaad9cf5be7a6 1/1 column families, dataSize=374 B heapSize=1.45 KB 2024-12-01T23:55:08,123 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/default/TestQuotaAdmin0/3db8e2be75873790be5eaad9cf5be7a6/.tmp/cf/e24210c0e2324fdf9e78cbfe19afacea is 38, key is row-0/cf:q/1733097307312/Put/seqid=0 2024-12-01T23:55:08,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40611 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=22 2024-12-01T23:55:08,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46129 is added to blk_1073741847_1023 (size=4967) 2024-12-01T23:55:08,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39935 is added to blk_1073741847_1023 (size=4967) 2024-12-01T23:55:08,143 INFO [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=374 B at sequenceid=15 (bloomFilter=false), to=hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/default/TestQuotaAdmin0/3db8e2be75873790be5eaad9cf5be7a6/.tmp/cf/e24210c0e2324fdf9e78cbfe19afacea 2024-12-01T23:55:08,200 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/default/TestQuotaAdmin0/3db8e2be75873790be5eaad9cf5be7a6/.tmp/cf/e24210c0e2324fdf9e78cbfe19afacea as hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/default/TestQuotaAdmin0/3db8e2be75873790be5eaad9cf5be7a6/cf/e24210c0e2324fdf9e78cbfe19afacea 2024-12-01T23:55:08,213 INFO [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/default/TestQuotaAdmin0/3db8e2be75873790be5eaad9cf5be7a6/cf/e24210c0e2324fdf9e78cbfe19afacea, entries=6, sequenceid=15, filesize=4.9 K 2024-12-01T23:55:08,222 INFO [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegion(3140): Finished flush of dataSize ~374 B/374, heapSize ~1.44 KB/1472, currentSize=0 B/0 for 3db8e2be75873790be5eaad9cf5be7a6 in 168ms, sequenceid=15, compaction requested=false 2024-12-01T23:55:08,234 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/default/TestQuotaAdmin0/3db8e2be75873790be5eaad9cf5be7a6/recovered.edits/18.seqid, newMaxSeqId=18, maxSeqId=1 2024-12-01T23:55:08,237 INFO [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegion(1973): Closed TestQuotaAdmin0,,1733097238113.3db8e2be75873790be5eaad9cf5be7a6. 2024-12-01T23:55:08,237 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegion(1676): Region close journal for 3db8e2be75873790be5eaad9cf5be7a6: Waiting for close lock at 1733097308048Running coprocessor pre-close hooks at 1733097308048Disabling compacts and flushes for region at 1733097308048Disabling writes for close at 1733097308048Obtaining lock to block concurrent updates at 1733097308052 (+4 ms)Preparing flush snapshotting stores in 3db8e2be75873790be5eaad9cf5be7a6 at 1733097308052Finished memstore snapshotting TestQuotaAdmin0,,1733097238113.3db8e2be75873790be5eaad9cf5be7a6., syncing WAL and waiting on mvcc, flushsize=dataSize=374, getHeapSize=1472, getOffHeapSize=0, getCellsCount=11 at 1733097308061 (+9 ms)Flushing stores of TestQuotaAdmin0,,1733097238113.3db8e2be75873790be5eaad9cf5be7a6. at 1733097308062 (+1 ms)Flushing 3db8e2be75873790be5eaad9cf5be7a6/cf: creating writer at 1733097308065 (+3 ms)Flushing 3db8e2be75873790be5eaad9cf5be7a6/cf: appending metadata at 1733097308115 (+50 ms)Flushing 3db8e2be75873790be5eaad9cf5be7a6/cf: closing flushed file at 1733097308118 (+3 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@655c6c58: reopening flushed file at 1733097308198 (+80 ms)Finished flush of dataSize ~374 B/374, heapSize ~1.44 KB/1472, currentSize=0 B/0 for 3db8e2be75873790be5eaad9cf5be7a6 in 168ms, sequenceid=15, compaction requested=false at 1733097308222 (+24 ms)Writing region close event to WAL at 1733097308229 (+7 ms)Running coprocessor post-close hooks at 1733097308235 (+6 ms)Closed at 1733097308237 (+2 ms) 2024-12-01T23:55:08,240 INFO [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] handler.UnassignRegionHandler(157): Closed 3db8e2be75873790be5eaad9cf5be7a6 2024-12-01T23:55:08,241 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=24 updating hbase:meta row=3db8e2be75873790be5eaad9cf5be7a6, regionState=CLOSED 2024-12-01T23:55:08,245 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=25, ppid=24, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3db8e2be75873790be5eaad9cf5be7a6, server=579b0f681375,45401,1733097235193 because future has completed 2024-12-01T23:55:08,253 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=25, resume processing ppid=24 2024-12-01T23:55:08,255 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=25, ppid=24, state=SUCCESS, hasLock=false; CloseRegionProcedure 3db8e2be75873790be5eaad9cf5be7a6, server=579b0f681375,45401,1733097235193 in 360 msec 2024-12-01T23:55:08,257 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=24, resume processing ppid=23 2024-12-01T23:55:08,257 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=24, ppid=23, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin0, region=3db8e2be75873790be5eaad9cf5be7a6, UNASSIGN in 371 msec 2024-12-01T23:55:08,262 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=23, resume processing ppid=22 2024-12-01T23:55:08,262 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=23, ppid=22, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=TestQuotaAdmin0 in 381 msec 2024-12-01T23:55:08,266 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin0","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733097308266"}]},"ts":"1733097308266"} 2024-12-01T23:55:08,270 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin0, state=DISABLED in hbase:meta 2024-12-01T23:55:08,270 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestQuotaAdmin0 to state=DISABLED 2024-12-01T23:55:08,274 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=22, state=SUCCESS, hasLock=false; DisableTableProcedure table=TestQuotaAdmin0 in 406 msec 2024-12-01T23:55:08,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40611 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=22 2024-12-01T23:55:08,643 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:TestQuotaAdmin0 completed 2024-12-01T23:55:08,645 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40611 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete TestQuotaAdmin0 2024-12-01T23:55:08,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40611 {}] procedure2.ProcedureExecutor(1139): Stored pid=26, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=TestQuotaAdmin0 2024-12-01T23:55:08,651 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=26, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin0 2024-12-01T23:55:08,655 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=26, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin0 2024-12-01T23:55:08,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40611 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=26 2024-12-01T23:55:08,662 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/default/TestQuotaAdmin0/3db8e2be75873790be5eaad9cf5be7a6 2024-12-01T23:55:08,668 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/default/TestQuotaAdmin0/3db8e2be75873790be5eaad9cf5be7a6/cf, FileablePath, hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/default/TestQuotaAdmin0/3db8e2be75873790be5eaad9cf5be7a6/recovered.edits] 2024-12-01T23:55:08,677 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/default/TestQuotaAdmin0/3db8e2be75873790be5eaad9cf5be7a6/cf/e24210c0e2324fdf9e78cbfe19afacea to hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/archive/data/default/TestQuotaAdmin0/3db8e2be75873790be5eaad9cf5be7a6/cf/e24210c0e2324fdf9e78cbfe19afacea 2024-12-01T23:55:08,682 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/default/TestQuotaAdmin0/3db8e2be75873790be5eaad9cf5be7a6/recovered.edits/18.seqid to hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/archive/data/default/TestQuotaAdmin0/3db8e2be75873790be5eaad9cf5be7a6/recovered.edits/18.seqid 2024-12-01T23:55:08,683 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/default/TestQuotaAdmin0/3db8e2be75873790be5eaad9cf5be7a6 2024-12-01T23:55:08,683 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived TestQuotaAdmin0 regions 2024-12-01T23:55:08,689 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=26, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin0 2024-12-01T23:55:08,694 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45401 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-12-01T23:55:08,698 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestQuotaAdmin0 from hbase:meta 2024-12-01T23:55:08,702 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'TestQuotaAdmin0' descriptor. 2024-12-01T23:55:08,704 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=26, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin0 2024-12-01T23:55:08,704 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'TestQuotaAdmin0' from region states. 2024-12-01T23:55:08,704 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"TestQuotaAdmin0,,1733097238113.3db8e2be75873790be5eaad9cf5be7a6.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733097308704"}]},"ts":"9223372036854775807"} 2024-12-01T23:55:08,707 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 1 regions from META 2024-12-01T23:55:08,707 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 3db8e2be75873790be5eaad9cf5be7a6, NAME => 'TestQuotaAdmin0,,1733097238113.3db8e2be75873790be5eaad9cf5be7a6.', STARTKEY => '', ENDKEY => ''}] 2024-12-01T23:55:08,708 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'TestQuotaAdmin0' as deleted. 2024-12-01T23:55:08,708 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"TestQuotaAdmin0","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733097308708"}]},"ts":"9223372036854775807"} 2024-12-01T23:55:08,713 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table TestQuotaAdmin0 state from META 2024-12-01T23:55:08,714 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=26, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin0 2024-12-01T23:55:08,718 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=26, state=SUCCESS, hasLock=false; DeleteTableProcedure table=TestQuotaAdmin0 in 68 msec 2024-12-01T23:55:08,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40611 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=26 2024-12-01T23:55:08,914 DEBUG [RPCClient-NioEventLoopGroup-5-4 {}] client.AsyncRegionLocator(219): Clear meta cache for TestQuotaAdmin0 2024-12-01T23:55:08,914 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:TestQuotaAdmin0 completed 2024-12-01T23:55:08,915 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40611 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable TestQuotaAdmin1 2024-12-01T23:55:08,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40611 {}] procedure2.ProcedureExecutor(1139): Stored pid=27, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=TestQuotaAdmin1 2024-12-01T23:55:08,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40611 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=27 2024-12-01T23:55:08,920 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733097308920"}]},"ts":"1733097308920"} 2024-12-01T23:55:08,925 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin1, state=DISABLING in hbase:meta 2024-12-01T23:55:08,925 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestQuotaAdmin1 to state=DISABLING 2024-12-01T23:55:08,926 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=28, ppid=27, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=TestQuotaAdmin1}] 2024-12-01T23:55:08,928 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin1, region=6bde2c94473bacf7f0b53f286324955b, UNASSIGN}] 2024-12-01T23:55:08,930 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=29, ppid=28, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin1, region=6bde2c94473bacf7f0b53f286324955b, UNASSIGN 2024-12-01T23:55:08,932 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=29 updating hbase:meta row=6bde2c94473bacf7f0b53f286324955b, regionState=CLOSING, regionLocation=579b0f681375,45401,1733097235193 2024-12-01T23:55:08,934 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=29, ppid=28, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestQuotaAdmin1, region=6bde2c94473bacf7f0b53f286324955b, UNASSIGN because future has completed 2024-12-01T23:55:08,935 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-01T23:55:08,935 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=30, ppid=29, state=RUNNABLE, hasLock=false; CloseRegionProcedure 6bde2c94473bacf7f0b53f286324955b, server=579b0f681375,45401,1733097235193}] 2024-12-01T23:55:09,089 INFO [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=30}] handler.UnassignRegionHandler(122): Close 6bde2c94473bacf7f0b53f286324955b 2024-12-01T23:55:09,089 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=30}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-01T23:55:09,089 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=30}] regionserver.HRegion(1722): Closing 6bde2c94473bacf7f0b53f286324955b, disabling compactions & flushes 2024-12-01T23:55:09,089 INFO [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=30}] regionserver.HRegion(1755): Closing region TestQuotaAdmin1,,1733097239715.6bde2c94473bacf7f0b53f286324955b. 2024-12-01T23:55:09,089 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=30}] regionserver.HRegion(1776): Time limited wait for close lock on TestQuotaAdmin1,,1733097239715.6bde2c94473bacf7f0b53f286324955b. 2024-12-01T23:55:09,089 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=30}] regionserver.HRegion(1843): Acquired close lock on TestQuotaAdmin1,,1733097239715.6bde2c94473bacf7f0b53f286324955b. after waiting 0 ms 2024-12-01T23:55:09,090 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=30}] regionserver.HRegion(1853): Updates disabled for region TestQuotaAdmin1,,1733097239715.6bde2c94473bacf7f0b53f286324955b. 2024-12-01T23:55:09,095 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=30}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/default/TestQuotaAdmin1/6bde2c94473bacf7f0b53f286324955b/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-01T23:55:09,096 INFO [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=30}] regionserver.HRegion(1973): Closed TestQuotaAdmin1,,1733097239715.6bde2c94473bacf7f0b53f286324955b. 2024-12-01T23:55:09,097 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=30}] regionserver.HRegion(1676): Region close journal for 6bde2c94473bacf7f0b53f286324955b: Waiting for close lock at 1733097309089Running coprocessor pre-close hooks at 1733097309089Disabling compacts and flushes for region at 1733097309089Disabling writes for close at 1733097309089Writing region close event to WAL at 1733097309090 (+1 ms)Running coprocessor post-close hooks at 1733097309096 (+6 ms)Closed at 1733097309096 2024-12-01T23:55:09,099 INFO [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=30}] handler.UnassignRegionHandler(157): Closed 6bde2c94473bacf7f0b53f286324955b 2024-12-01T23:55:09,100 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=29 updating hbase:meta row=6bde2c94473bacf7f0b53f286324955b, regionState=CLOSED 2024-12-01T23:55:09,102 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=30, ppid=29, state=RUNNABLE, hasLock=false; CloseRegionProcedure 6bde2c94473bacf7f0b53f286324955b, server=579b0f681375,45401,1733097235193 because future has completed 2024-12-01T23:55:09,106 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=30, resume processing ppid=29 2024-12-01T23:55:09,108 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=30, ppid=29, state=SUCCESS, hasLock=false; CloseRegionProcedure 6bde2c94473bacf7f0b53f286324955b, server=579b0f681375,45401,1733097235193 in 169 msec 2024-12-01T23:55:09,110 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=29, resume processing ppid=28 2024-12-01T23:55:09,110 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=29, ppid=28, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin1, region=6bde2c94473bacf7f0b53f286324955b, UNASSIGN in 178 msec 2024-12-01T23:55:09,113 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=28, resume processing ppid=27 2024-12-01T23:55:09,113 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=28, ppid=27, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=TestQuotaAdmin1 in 185 msec 2024-12-01T23:55:09,115 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733097309115"}]},"ts":"1733097309115"} 2024-12-01T23:55:09,117 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin1, state=DISABLED in hbase:meta 2024-12-01T23:55:09,118 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestQuotaAdmin1 to state=DISABLED 2024-12-01T23:55:09,123 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=27, state=SUCCESS, hasLock=false; DisableTableProcedure table=TestQuotaAdmin1 in 203 msec 2024-12-01T23:55:09,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40611 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=27 2024-12-01T23:55:09,172 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:TestQuotaAdmin1 completed 2024-12-01T23:55:09,173 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40611 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete TestQuotaAdmin1 2024-12-01T23:55:09,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40611 {}] procedure2.ProcedureExecutor(1139): Stored pid=31, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=TestQuotaAdmin1 2024-12-01T23:55:09,176 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=31, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin1 2024-12-01T23:55:09,178 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=31, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin1 2024-12-01T23:55:09,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40611 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-12-01T23:55:09,181 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/default/TestQuotaAdmin1/6bde2c94473bacf7f0b53f286324955b 2024-12-01T23:55:09,185 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/default/TestQuotaAdmin1/6bde2c94473bacf7f0b53f286324955b/cf, FileablePath, hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/default/TestQuotaAdmin1/6bde2c94473bacf7f0b53f286324955b/recovered.edits] 2024-12-01T23:55:09,196 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/default/TestQuotaAdmin1/6bde2c94473bacf7f0b53f286324955b/recovered.edits/4.seqid to hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/archive/data/default/TestQuotaAdmin1/6bde2c94473bacf7f0b53f286324955b/recovered.edits/4.seqid 2024-12-01T23:55:09,199 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/default/TestQuotaAdmin1/6bde2c94473bacf7f0b53f286324955b 2024-12-01T23:55:09,199 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestQuotaAdmin1 regions 2024-12-01T23:55:09,206 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=31, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin1 2024-12-01T23:55:09,208 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestQuotaAdmin1 from hbase:meta 2024-12-01T23:55:09,211 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'TestQuotaAdmin1' descriptor. 2024-12-01T23:55:09,214 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=31, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin1 2024-12-01T23:55:09,214 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'TestQuotaAdmin1' from region states. 2024-12-01T23:55:09,214 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"TestQuotaAdmin1,,1733097239715.6bde2c94473bacf7f0b53f286324955b.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733097309214"}]},"ts":"9223372036854775807"} 2024-12-01T23:55:09,217 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 1 regions from META 2024-12-01T23:55:09,217 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 6bde2c94473bacf7f0b53f286324955b, NAME => 'TestQuotaAdmin1,,1733097239715.6bde2c94473bacf7f0b53f286324955b.', STARTKEY => '', ENDKEY => ''}] 2024-12-01T23:55:09,217 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'TestQuotaAdmin1' as deleted. 2024-12-01T23:55:09,218 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"TestQuotaAdmin1","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733097309217"}]},"ts":"9223372036854775807"} 2024-12-01T23:55:09,220 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table TestQuotaAdmin1 state from META 2024-12-01T23:55:09,222 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=31, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin1 2024-12-01T23:55:09,225 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=31, state=SUCCESS, hasLock=false; DeleteTableProcedure table=TestQuotaAdmin1 in 50 msec 2024-12-01T23:55:09,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40611 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-12-01T23:55:09,443 DEBUG [RPCClient-NioEventLoopGroup-5-4 {}] client.AsyncRegionLocator(219): Clear meta cache for TestQuotaAdmin1 2024-12-01T23:55:09,443 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:TestQuotaAdmin1 completed 2024-12-01T23:55:09,444 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40611 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable TestQuotaAdmin2 2024-12-01T23:55:09,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40611 {}] procedure2.ProcedureExecutor(1139): Stored pid=32, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=TestQuotaAdmin2 2024-12-01T23:55:09,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40611 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=32 2024-12-01T23:55:09,451 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin2","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733097309451"}]},"ts":"1733097309451"} 2024-12-01T23:55:09,453 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin2, state=DISABLING in hbase:meta 2024-12-01T23:55:09,453 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestQuotaAdmin2 to state=DISABLING 2024-12-01T23:55:09,455 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=TestQuotaAdmin2}] 2024-12-01T23:55:09,458 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=34, ppid=33, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin2, region=1e69caa932f6b592705f1016b011510c, UNASSIGN}] 2024-12-01T23:55:09,460 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=34, ppid=33, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin2, region=1e69caa932f6b592705f1016b011510c, UNASSIGN 2024-12-01T23:55:09,461 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=34 updating hbase:meta row=1e69caa932f6b592705f1016b011510c, regionState=CLOSING, regionLocation=579b0f681375,45401,1733097235193 2024-12-01T23:55:09,464 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=34, ppid=33, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestQuotaAdmin2, region=1e69caa932f6b592705f1016b011510c, UNASSIGN because future has completed 2024-12-01T23:55:09,465 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-01T23:55:09,465 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=35, ppid=34, state=RUNNABLE, hasLock=false; CloseRegionProcedure 1e69caa932f6b592705f1016b011510c, server=579b0f681375,45401,1733097235193}] 2024-12-01T23:55:09,620 INFO [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(122): Close 1e69caa932f6b592705f1016b011510c 2024-12-01T23:55:09,620 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-01T23:55:09,620 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1722): Closing 1e69caa932f6b592705f1016b011510c, disabling compactions & flushes 2024-12-01T23:55:09,620 INFO [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1755): Closing region TestQuotaAdmin2,,1733097240516.1e69caa932f6b592705f1016b011510c. 2024-12-01T23:55:09,620 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1776): Time limited wait for close lock on TestQuotaAdmin2,,1733097240516.1e69caa932f6b592705f1016b011510c. 2024-12-01T23:55:09,621 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1843): Acquired close lock on TestQuotaAdmin2,,1733097240516.1e69caa932f6b592705f1016b011510c. after waiting 0 ms 2024-12-01T23:55:09,621 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1853): Updates disabled for region TestQuotaAdmin2,,1733097240516.1e69caa932f6b592705f1016b011510c. 2024-12-01T23:55:09,628 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/default/TestQuotaAdmin2/1e69caa932f6b592705f1016b011510c/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-01T23:55:09,629 INFO [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1973): Closed TestQuotaAdmin2,,1733097240516.1e69caa932f6b592705f1016b011510c. 2024-12-01T23:55:09,630 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1676): Region close journal for 1e69caa932f6b592705f1016b011510c: Waiting for close lock at 1733097309620Running coprocessor pre-close hooks at 1733097309620Disabling compacts and flushes for region at 1733097309620Disabling writes for close at 1733097309621 (+1 ms)Writing region close event to WAL at 1733097309621Running coprocessor post-close hooks at 1733097309629 (+8 ms)Closed at 1733097309629 2024-12-01T23:55:09,634 INFO [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(157): Closed 1e69caa932f6b592705f1016b011510c 2024-12-01T23:55:09,635 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=34 updating hbase:meta row=1e69caa932f6b592705f1016b011510c, regionState=CLOSED 2024-12-01T23:55:09,638 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=35, ppid=34, state=RUNNABLE, hasLock=false; CloseRegionProcedure 1e69caa932f6b592705f1016b011510c, server=579b0f681375,45401,1733097235193 because future has completed 2024-12-01T23:55:09,646 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=35, resume processing ppid=34 2024-12-01T23:55:09,647 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=35, ppid=34, state=SUCCESS, hasLock=false; CloseRegionProcedure 1e69caa932f6b592705f1016b011510c, server=579b0f681375,45401,1733097235193 in 175 msec 2024-12-01T23:55:09,649 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=34, resume processing ppid=33 2024-12-01T23:55:09,649 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=34, ppid=33, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestQuotaAdmin2, region=1e69caa932f6b592705f1016b011510c, UNASSIGN in 188 msec 2024-12-01T23:55:09,656 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=33, resume processing ppid=32 2024-12-01T23:55:09,656 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=33, ppid=32, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=TestQuotaAdmin2 in 196 msec 2024-12-01T23:55:09,658 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestQuotaAdmin2","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733097309658"}]},"ts":"1733097309658"} 2024-12-01T23:55:09,661 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=TestQuotaAdmin2, state=DISABLED in hbase:meta 2024-12-01T23:55:09,661 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestQuotaAdmin2 to state=DISABLED 2024-12-01T23:55:09,665 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=32, state=SUCCESS, hasLock=false; DisableTableProcedure table=TestQuotaAdmin2 in 218 msec 2024-12-01T23:55:09,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40611 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=32 2024-12-01T23:55:09,703 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:TestQuotaAdmin2 completed 2024-12-01T23:55:09,703 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40611 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete TestQuotaAdmin2 2024-12-01T23:55:09,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40611 {}] procedure2.ProcedureExecutor(1139): Stored pid=36, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=TestQuotaAdmin2 2024-12-01T23:55:09,706 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=36, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin2 2024-12-01T23:55:09,707 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=36, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin2 2024-12-01T23:55:09,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40611 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=36 2024-12-01T23:55:09,710 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/default/TestQuotaAdmin2/1e69caa932f6b592705f1016b011510c 2024-12-01T23:55:09,713 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/default/TestQuotaAdmin2/1e69caa932f6b592705f1016b011510c/cf, FileablePath, hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/default/TestQuotaAdmin2/1e69caa932f6b592705f1016b011510c/recovered.edits] 2024-12-01T23:55:09,720 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/default/TestQuotaAdmin2/1e69caa932f6b592705f1016b011510c/recovered.edits/4.seqid to hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/archive/data/default/TestQuotaAdmin2/1e69caa932f6b592705f1016b011510c/recovered.edits/4.seqid 2024-12-01T23:55:09,725 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/default/TestQuotaAdmin2/1e69caa932f6b592705f1016b011510c 2024-12-01T23:55:09,725 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestQuotaAdmin2 regions 2024-12-01T23:55:09,728 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=36, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin2 2024-12-01T23:55:09,732 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestQuotaAdmin2 from hbase:meta 2024-12-01T23:55:09,734 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'TestQuotaAdmin2' descriptor. 2024-12-01T23:55:09,736 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=36, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin2 2024-12-01T23:55:09,736 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'TestQuotaAdmin2' from region states. 2024-12-01T23:55:09,736 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"TestQuotaAdmin2,,1733097240516.1e69caa932f6b592705f1016b011510c.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733097309736"}]},"ts":"9223372036854775807"} 2024-12-01T23:55:09,738 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 1 regions from META 2024-12-01T23:55:09,738 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 1e69caa932f6b592705f1016b011510c, NAME => 'TestQuotaAdmin2,,1733097240516.1e69caa932f6b592705f1016b011510c.', STARTKEY => '', ENDKEY => ''}] 2024-12-01T23:55:09,738 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'TestQuotaAdmin2' as deleted. 2024-12-01T23:55:09,738 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"TestQuotaAdmin2","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733097309738"}]},"ts":"9223372036854775807"} 2024-12-01T23:55:09,740 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table TestQuotaAdmin2 state from META 2024-12-01T23:55:09,741 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=36, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=TestQuotaAdmin2 2024-12-01T23:55:09,743 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=36, state=SUCCESS, hasLock=false; DeleteTableProcedure table=TestQuotaAdmin2 in 39 msec 2024-12-01T23:55:09,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40611 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=36 2024-12-01T23:55:09,973 DEBUG [RPCClient-NioEventLoopGroup-5-4 {}] client.AsyncRegionLocator(219): Clear meta cache for TestQuotaAdmin2 2024-12-01T23:55:09,973 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:TestQuotaAdmin2 completed 2024-12-01T23:55:09,973 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40611 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable TestNs:TestTable 2024-12-01T23:55:09,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40611 {}] procedure2.ProcedureExecutor(1139): Stored pid=37, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=TestNs:TestTable 2024-12-01T23:55:09,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40611 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=37 2024-12-01T23:55:09,978 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestNs:TestTable","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733097309977"}]},"ts":"1733097309977"} 2024-12-01T23:55:09,980 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestNs:TestTable, state=DISABLING in hbase:meta 2024-12-01T23:55:09,980 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestNs:TestTable to state=DISABLING 2024-12-01T23:55:09,981 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=38, ppid=37, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=TestNs:TestTable}] 2024-12-01T23:55:09,982 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=acce7aef1031d5e94b21fdd93f0aa04d, UNASSIGN}, {pid=40, ppid=38, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=b7dc35984ce3d77666ebbdeb398352af, UNASSIGN}] 2024-12-01T23:55:09,983 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=40, ppid=38, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=b7dc35984ce3d77666ebbdeb398352af, UNASSIGN 2024-12-01T23:55:09,983 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=39, ppid=38, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=acce7aef1031d5e94b21fdd93f0aa04d, UNASSIGN 2024-12-01T23:55:09,984 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=39 updating hbase:meta row=acce7aef1031d5e94b21fdd93f0aa04d, regionState=CLOSING, regionLocation=579b0f681375,45401,1733097235193 2024-12-01T23:55:09,984 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=40 updating hbase:meta row=b7dc35984ce3d77666ebbdeb398352af, regionState=CLOSING, regionLocation=579b0f681375,44651,1733097235352 2024-12-01T23:55:09,987 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=39, ppid=38, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestNs:TestTable, region=acce7aef1031d5e94b21fdd93f0aa04d, UNASSIGN because future has completed 2024-12-01T23:55:09,988 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-01T23:55:09,988 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=41, ppid=39, state=RUNNABLE, hasLock=false; CloseRegionProcedure acce7aef1031d5e94b21fdd93f0aa04d, server=579b0f681375,45401,1733097235193}] 2024-12-01T23:55:09,988 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=40, ppid=38, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestNs:TestTable, region=b7dc35984ce3d77666ebbdeb398352af, UNASSIGN because future has completed 2024-12-01T23:55:09,995 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-01T23:55:09,995 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=42, ppid=40, state=RUNNABLE, hasLock=false; CloseRegionProcedure b7dc35984ce3d77666ebbdeb398352af, server=579b0f681375,44651,1733097235352}] 2024-12-01T23:55:10,147 INFO [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] handler.UnassignRegionHandler(122): Close acce7aef1031d5e94b21fdd93f0aa04d 2024-12-01T23:55:10,147 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-01T23:55:10,148 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1722): Closing acce7aef1031d5e94b21fdd93f0aa04d, disabling compactions & flushes 2024-12-01T23:55:10,148 INFO [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1755): Closing region TestNs:TestTable,,1733097241623.acce7aef1031d5e94b21fdd93f0aa04d. 2024-12-01T23:55:10,148 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1776): Time limited wait for close lock on TestNs:TestTable,,1733097241623.acce7aef1031d5e94b21fdd93f0aa04d. 2024-12-01T23:55:10,148 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1843): Acquired close lock on TestNs:TestTable,,1733097241623.acce7aef1031d5e94b21fdd93f0aa04d. after waiting 0 ms 2024-12-01T23:55:10,148 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1853): Updates disabled for region TestNs:TestTable,,1733097241623.acce7aef1031d5e94b21fdd93f0aa04d. 2024-12-01T23:55:10,150 INFO [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(122): Close b7dc35984ce3d77666ebbdeb398352af 2024-12-01T23:55:10,150 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-01T23:55:10,150 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1722): Closing b7dc35984ce3d77666ebbdeb398352af, disabling compactions & flushes 2024-12-01T23:55:10,150 INFO [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1755): Closing region TestNs:TestTable,1,1733097241623.b7dc35984ce3d77666ebbdeb398352af. 2024-12-01T23:55:10,150 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1776): Time limited wait for close lock on TestNs:TestTable,1,1733097241623.b7dc35984ce3d77666ebbdeb398352af. 2024-12-01T23:55:10,150 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1843): Acquired close lock on TestNs:TestTable,1,1733097241623.b7dc35984ce3d77666ebbdeb398352af. after waiting 0 ms 2024-12-01T23:55:10,150 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1853): Updates disabled for region TestNs:TestTable,1,1733097241623.b7dc35984ce3d77666ebbdeb398352af. 2024-12-01T23:55:10,155 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/TestNs/TestTable/acce7aef1031d5e94b21fdd93f0aa04d/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-01T23:55:10,157 INFO [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1973): Closed TestNs:TestTable,,1733097241623.acce7aef1031d5e94b21fdd93f0aa04d. 2024-12-01T23:55:10,157 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1676): Region close journal for acce7aef1031d5e94b21fdd93f0aa04d: Waiting for close lock at 1733097310148Running coprocessor pre-close hooks at 1733097310148Disabling compacts and flushes for region at 1733097310148Disabling writes for close at 1733097310148Writing region close event to WAL at 1733097310148Running coprocessor post-close hooks at 1733097310157 (+9 ms)Closed at 1733097310157 2024-12-01T23:55:10,159 INFO [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] handler.UnassignRegionHandler(157): Closed acce7aef1031d5e94b21fdd93f0aa04d 2024-12-01T23:55:10,160 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=39 updating hbase:meta row=acce7aef1031d5e94b21fdd93f0aa04d, regionState=CLOSED 2024-12-01T23:55:10,162 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=41, ppid=39, state=RUNNABLE, hasLock=false; CloseRegionProcedure acce7aef1031d5e94b21fdd93f0aa04d, server=579b0f681375,45401,1733097235193 because future has completed 2024-12-01T23:55:10,162 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/TestNs/TestTable/b7dc35984ce3d77666ebbdeb398352af/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-01T23:55:10,163 INFO [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1973): Closed TestNs:TestTable,1,1733097241623.b7dc35984ce3d77666ebbdeb398352af. 2024-12-01T23:55:10,164 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1676): Region close journal for b7dc35984ce3d77666ebbdeb398352af: Waiting for close lock at 1733097310150Running coprocessor pre-close hooks at 1733097310150Disabling compacts and flushes for region at 1733097310150Disabling writes for close at 1733097310150Writing region close event to WAL at 1733097310153 (+3 ms)Running coprocessor post-close hooks at 1733097310163 (+10 ms)Closed at 1733097310163 2024-12-01T23:55:10,166 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=41, resume processing ppid=39 2024-12-01T23:55:10,166 INFO [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(157): Closed b7dc35984ce3d77666ebbdeb398352af 2024-12-01T23:55:10,166 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=41, ppid=39, state=SUCCESS, hasLock=false; CloseRegionProcedure acce7aef1031d5e94b21fdd93f0aa04d, server=579b0f681375,45401,1733097235193 in 175 msec 2024-12-01T23:55:10,166 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=40 updating hbase:meta row=b7dc35984ce3d77666ebbdeb398352af, regionState=CLOSED 2024-12-01T23:55:10,170 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=39, ppid=38, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=acce7aef1031d5e94b21fdd93f0aa04d, UNASSIGN in 184 msec 2024-12-01T23:55:10,171 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=42, ppid=40, state=RUNNABLE, hasLock=false; CloseRegionProcedure b7dc35984ce3d77666ebbdeb398352af, server=579b0f681375,44651,1733097235352 because future has completed 2024-12-01T23:55:10,175 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=42, resume processing ppid=40 2024-12-01T23:55:10,175 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=42, ppid=40, state=SUCCESS, hasLock=false; CloseRegionProcedure b7dc35984ce3d77666ebbdeb398352af, server=579b0f681375,44651,1733097235352 in 177 msec 2024-12-01T23:55:10,177 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=40, resume processing ppid=38 2024-12-01T23:55:10,177 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=40, ppid=38, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestNs:TestTable, region=b7dc35984ce3d77666ebbdeb398352af, UNASSIGN in 193 msec 2024-12-01T23:55:10,181 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=38, resume processing ppid=37 2024-12-01T23:55:10,181 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=38, ppid=37, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=TestNs:TestTable in 197 msec 2024-12-01T23:55:10,183 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestNs:TestTable","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733097310183"}]},"ts":"1733097310183"} 2024-12-01T23:55:10,185 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestNs:TestTable, state=DISABLED in hbase:meta 2024-12-01T23:55:10,185 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set TestNs:TestTable to state=DISABLED 2024-12-01T23:55:10,187 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=37, state=SUCCESS, hasLock=false; DisableTableProcedure table=TestNs:TestTable in 213 msec 2024-12-01T23:55:10,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40611 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=37 2024-12-01T23:55:10,232 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: TestNs:TestTable completed 2024-12-01T23:55:10,232 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40611 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete TestNs:TestTable 2024-12-01T23:55:10,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40611 {}] procedure2.ProcedureExecutor(1139): Stored pid=43, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=TestNs:TestTable 2024-12-01T23:55:10,264 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=43, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=TestNs:TestTable 2024-12-01T23:55:10,266 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=43, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=TestNs:TestTable 2024-12-01T23:55:10,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40611 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=43 2024-12-01T23:55:10,275 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/TestNs/TestTable/b7dc35984ce3d77666ebbdeb398352af 2024-12-01T23:55:10,276 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/TestNs/TestTable/acce7aef1031d5e94b21fdd93f0aa04d 2024-12-01T23:55:10,281 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/TestNs/TestTable/b7dc35984ce3d77666ebbdeb398352af/cf, FileablePath, hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/TestNs/TestTable/b7dc35984ce3d77666ebbdeb398352af/recovered.edits] 2024-12-01T23:55:10,281 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/TestNs/TestTable/acce7aef1031d5e94b21fdd93f0aa04d/cf, FileablePath, hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/TestNs/TestTable/acce7aef1031d5e94b21fdd93f0aa04d/recovered.edits] 2024-12-01T23:55:10,295 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/TestNs/TestTable/acce7aef1031d5e94b21fdd93f0aa04d/recovered.edits/4.seqid to hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/archive/data/TestNs/TestTable/acce7aef1031d5e94b21fdd93f0aa04d/recovered.edits/4.seqid 2024-12-01T23:55:10,296 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/TestNs/TestTable/b7dc35984ce3d77666ebbdeb398352af/recovered.edits/4.seqid to hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/archive/data/TestNs/TestTable/b7dc35984ce3d77666ebbdeb398352af/recovered.edits/4.seqid 2024-12-01T23:55:10,296 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/TestNs/TestTable/acce7aef1031d5e94b21fdd93f0aa04d 2024-12-01T23:55:10,297 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/TestNs/TestTable/b7dc35984ce3d77666ebbdeb398352af 2024-12-01T23:55:10,297 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestNs:TestTable regions 2024-12-01T23:55:10,302 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=43, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=TestNs:TestTable 2024-12-01T23:55:10,305 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of TestNs:TestTable from hbase:meta 2024-12-01T23:55:10,308 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'TestNs:TestTable' descriptor. 2024-12-01T23:55:10,310 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=43, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=TestNs:TestTable 2024-12-01T23:55:10,310 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'TestNs:TestTable' from region states. 2024-12-01T23:55:10,311 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"TestNs:TestTable,,1733097241623.acce7aef1031d5e94b21fdd93f0aa04d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733097310310"}]},"ts":"9223372036854775807"} 2024-12-01T23:55:10,311 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"TestNs:TestTable,1,1733097241623.b7dc35984ce3d77666ebbdeb398352af.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733097310310"}]},"ts":"9223372036854775807"} 2024-12-01T23:55:10,315 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-01T23:55:10,315 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => acce7aef1031d5e94b21fdd93f0aa04d, NAME => 'TestNs:TestTable,,1733097241623.acce7aef1031d5e94b21fdd93f0aa04d.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => b7dc35984ce3d77666ebbdeb398352af, NAME => 'TestNs:TestTable,1,1733097241623.b7dc35984ce3d77666ebbdeb398352af.', STARTKEY => '1', ENDKEY => ''}] 2024-12-01T23:55:10,316 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'TestNs:TestTable' as deleted. 2024-12-01T23:55:10,316 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"TestNs:TestTable","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733097310316"}]},"ts":"9223372036854775807"} 2024-12-01T23:55:10,320 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table TestNs:TestTable state from META 2024-12-01T23:55:10,321 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=43, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=TestNs:TestTable 2024-12-01T23:55:10,326 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=43, state=SUCCESS, hasLock=false; DeleteTableProcedure table=TestNs:TestTable in 88 msec 2024-12-01T23:55:10,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40611 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=43 2024-12-01T23:55:10,533 DEBUG [RPCClient-NioEventLoopGroup-5-4 {}] client.AsyncRegionLocator(219): Clear meta cache for TestNs:TestTable 2024-12-01T23:55:10,533 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: TestNs:TestTable completed 2024-12-01T23:55:10,537 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40611 {}] master.HMaster$20(3601): Client=jenkins//172.17.0.2 delete TestNs 2024-12-01T23:55:10,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40611 {}] procedure2.ProcedureExecutor(1139): Stored pid=44, state=RUNNABLE:DELETE_NAMESPACE_PREPARE, hasLock=false; DeleteNamespaceProcedure, namespace=TestNs 2024-12-01T23:55:10,544 INFO [PEWorker-5 {}] procedure.DeleteNamespaceProcedure(67): pid=44, state=RUNNABLE:DELETE_NAMESPACE_PREPARE, hasLock=true; DeleteNamespaceProcedure, namespace=TestNs 2024-12-01T23:55:10,546 INFO [PEWorker-5 {}] procedure.DeleteNamespaceProcedure(67): pid=44, state=RUNNABLE:DELETE_NAMESPACE_DELETE_FROM_NS_TABLE, hasLock=true; DeleteNamespaceProcedure, namespace=TestNs 2024-12-01T23:55:10,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40611 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-12-01T23:55:10,549 INFO [PEWorker-5 {}] procedure.DeleteNamespaceProcedure(67): pid=44, state=RUNNABLE:DELETE_NAMESPACE_DELETE_DIRECTORIES, hasLock=true; DeleteNamespaceProcedure, namespace=TestNs 2024-12-01T23:55:10,552 INFO [PEWorker-5 {}] procedure.DeleteNamespaceProcedure(67): pid=44, state=RUNNABLE:DELETE_NAMESPACE_REMOVE_NAMESPACE_QUOTA, hasLock=true; DeleteNamespaceProcedure, namespace=TestNs 2024-12-01T23:55:10,553 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=44, state=SUCCESS, hasLock=false; DeleteNamespaceProcedure, namespace=TestNs in 15 msec 2024-12-01T23:55:10,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40611 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-12-01T23:55:10,803 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-01T23:55:10,803 INFO [RPCClient-NioEventLoopGroup-5-4 {}] client.RawAsyncHBaseAdmin$NamespaceProcedureBiConsumer(2745): Operation: DELETE_NAMESPACE, Namespace: TestNs completed 2024-12-01T23:55:10,803 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-01T23:55:10,803 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.tearDownAfterClass(TestClusterScopeQuotaThrottle.java:107) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-01T23:55:10,809 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T23:55:10,810 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T23:55:10,810 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-01T23:55:10,810 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-01T23:55:10,810 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1484730631, stopped=false 2024-12-01T23:55:10,811 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.quotas.MasterQuotasObserver 2024-12-01T23:55:10,811 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=579b0f681375,40611,1733097234530 2024-12-01T23:55:10,877 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40611-0x101949ab0000000, quorum=127.0.0.1:61873, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-01T23:55:10,877 DEBUG [pool-50-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45401-0x101949ab0000001, quorum=127.0.0.1:61873, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-01T23:55:10,877 DEBUG [pool-50-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45401-0x101949ab0000001, quorum=127.0.0.1:61873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:55:10,877 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40611-0x101949ab0000000, quorum=127.0.0.1:61873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:55:10,877 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-01T23:55:10,877 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44651-0x101949ab0000002, quorum=127.0.0.1:61873, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-01T23:55:10,877 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44651-0x101949ab0000002, quorum=127.0.0.1:61873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:55:10,877 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-01T23:55:10,877 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.quotas.TestClusterScopeQuotaThrottle.tearDownAfterClass(TestClusterScopeQuotaThrottle.java:107) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-01T23:55:10,878 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45401-0x101949ab0000001, quorum=127.0.0.1:61873, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T23:55:10,878 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T23:55:10,878 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40611-0x101949ab0000000, quorum=127.0.0.1:61873, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T23:55:10,879 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '579b0f681375,45401,1733097235193' ***** 2024-12-01T23:55:10,879 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-01T23:55:10,879 INFO [RS:0;579b0f681375:45401 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-01T23:55:10,879 INFO [RS:0;579b0f681375:45401 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-01T23:55:10,880 INFO [RS:0;579b0f681375:45401 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-01T23:55:10,880 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44651-0x101949ab0000002, quorum=127.0.0.1:61873, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T23:55:10,880 INFO [RS:0;579b0f681375:45401 {}] regionserver.HRegionServer(959): stopping server 579b0f681375,45401,1733097235193 2024-12-01T23:55:10,880 INFO [RS:0;579b0f681375:45401 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-01T23:55:10,880 INFO [RS:0;579b0f681375:45401 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;579b0f681375:45401. 2024-12-01T23:55:10,880 DEBUG [RS:0;579b0f681375:45401 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-01T23:55:10,880 DEBUG [RS:0;579b0f681375:45401 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T23:55:10,880 INFO [RS:0;579b0f681375:45401 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-01T23:55:10,880 INFO [RS:0;579b0f681375:45401 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-01T23:55:10,880 INFO [RS:0;579b0f681375:45401 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-01T23:55:10,881 INFO [RS:0;579b0f681375:45401 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-01T23:55:10,881 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '579b0f681375,44651,1733097235352' ***** 2024-12-01T23:55:10,881 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-01T23:55:10,881 INFO [RS:1;579b0f681375:44651 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-01T23:55:10,881 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-01T23:55:10,881 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-01T23:55:10,881 INFO [RS:1;579b0f681375:44651 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-01T23:55:10,881 INFO [RS:1;579b0f681375:44651 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-01T23:55:10,881 INFO [RS:1;579b0f681375:44651 {}] regionserver.HRegionServer(3091): Received CLOSE for 67f67293b31d223687049a16b9d906fa 2024-12-01T23:55:10,881 INFO [RS:1;579b0f681375:44651 {}] regionserver.HRegionServer(959): stopping server 579b0f681375,44651,1733097235352 2024-12-01T23:55:10,881 INFO [RS:1;579b0f681375:44651 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-01T23:55:10,882 INFO [RS:1;579b0f681375:44651 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;579b0f681375:44651. 2024-12-01T23:55:10,882 DEBUG [RS:1;579b0f681375:44651 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-01T23:55:10,882 DEBUG [RS:1;579b0f681375:44651 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T23:55:10,882 INFO [RS:1;579b0f681375:44651 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-01T23:55:10,882 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 67f67293b31d223687049a16b9d906fa, disabling compactions & flushes 2024-12-01T23:55:10,882 INFO [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region hbase:quota,,1733097237538.67f67293b31d223687049a16b9d906fa. 2024-12-01T23:55:10,882 DEBUG [RS:1;579b0f681375:44651 {}] regionserver.HRegionServer(1325): Online Regions={67f67293b31d223687049a16b9d906fa=hbase:quota,,1733097237538.67f67293b31d223687049a16b9d906fa.} 2024-12-01T23:55:10,882 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:quota,,1733097237538.67f67293b31d223687049a16b9d906fa. 2024-12-01T23:55:10,882 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on hbase:quota,,1733097237538.67f67293b31d223687049a16b9d906fa. after waiting 0 ms 2024-12-01T23:55:10,882 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region hbase:quota,,1733097237538.67f67293b31d223687049a16b9d906fa. 2024-12-01T23:55:10,882 DEBUG [RS:1;579b0f681375:44651 {}] regionserver.HRegionServer(1351): Waiting on 67f67293b31d223687049a16b9d906fa 2024-12-01T23:55:10,882 INFO [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 67f67293b31d223687049a16b9d906fa 2/2 column families, dataSize=626 B heapSize=2.13 KB 2024-12-01T23:55:10,882 INFO [RS:0;579b0f681375:45401 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-01T23:55:10,883 DEBUG [RS:0;579b0f681375:45401 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-01T23:55:10,883 DEBUG [RS:0;579b0f681375:45401 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-01T23:55:10,884 DEBUG [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-01T23:55:10,884 INFO [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-01T23:55:10,884 DEBUG [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-01T23:55:10,884 DEBUG [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-01T23:55:10,884 DEBUG [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-01T23:55:10,884 INFO [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=13.33 KB heapSize=24.55 KB 2024-12-01T23:55:10,911 DEBUG [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/hbase/meta/1588230740/.tmp/info/e6e14a7a0b224941a7fb8d04bedbf5b5 is 135, key is hbase:quota,,1733097237538.67f67293b31d223687049a16b9d906fa./info:regioninfo/1733097237978/Put/seqid=0 2024-12-01T23:55:10,911 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/hbase/quota/67f67293b31d223687049a16b9d906fa/.tmp/q/503965605ad7478bac0f5cb8035a11da is 66, key is u.jenkins/q:s.default:/1733097243658/Put/seqid=0 2024-12-01T23:55:10,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46129 is added to blk_1073741848_1024 (size=5347) 2024-12-01T23:55:10,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39935 is added to blk_1073741848_1024 (size=5347) 2024-12-01T23:55:10,926 INFO [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=527 B at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/hbase/quota/67f67293b31d223687049a16b9d906fa/.tmp/q/503965605ad7478bac0f5cb8035a11da 2024-12-01T23:55:10,926 INFO [regionserver/579b0f681375:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T23:55:10,926 INFO [regionserver/579b0f681375:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T23:55:10,936 INFO [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 503965605ad7478bac0f5cb8035a11da 2024-12-01T23:55:10,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46129 is added to blk_1073741849_1025 (size=7362) 2024-12-01T23:55:10,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39935 is added to blk_1073741849_1025 (size=7362) 2024-12-01T23:55:10,955 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/hbase/quota/67f67293b31d223687049a16b9d906fa/.tmp/u/1c96db1bacaa4e55bbd7d8b4f644c658 is 43, key is t.TestNs:TestTable/u:/1733097306027/DeleteFamily/seqid=0 2024-12-01T23:55:10,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46129 is added to blk_1073741850_1026 (size=5219) 2024-12-01T23:55:10,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39935 is added to blk_1073741850_1026 (size=5219) 2024-12-01T23:55:11,083 DEBUG [RS:1;579b0f681375:44651 {}] regionserver.HRegionServer(1351): Waiting on 67f67293b31d223687049a16b9d906fa 2024-12-01T23:55:11,083 DEBUG [RS:0;579b0f681375:45401 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-01T23:55:11,283 DEBUG [RS:1;579b0f681375:44651 {}] regionserver.HRegionServer(1351): Waiting on 67f67293b31d223687049a16b9d906fa 2024-12-01T23:55:11,283 DEBUG [RS:0;579b0f681375:45401 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-01T23:55:11,348 INFO [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.80 KB at sequenceid=65 (bloomFilter=true), to=hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/hbase/meta/1588230740/.tmp/info/e6e14a7a0b224941a7fb8d04bedbf5b5 2024-12-01T23:55:11,367 INFO [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=99 B at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/hbase/quota/67f67293b31d223687049a16b9d906fa/.tmp/u/1c96db1bacaa4e55bbd7d8b4f644c658 2024-12-01T23:55:11,379 DEBUG [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/hbase/meta/1588230740/.tmp/ns/e95ce363b73b40a6ba08d32dca98f1c5 is 92, key is TestNs:TestTable,1,1733097241623.b7dc35984ce3d77666ebbdeb398352af./ns:/1733097310302/DeleteFamily/seqid=0 2024-12-01T23:55:11,381 INFO [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 1c96db1bacaa4e55bbd7d8b4f644c658 2024-12-01T23:55:11,383 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/hbase/quota/67f67293b31d223687049a16b9d906fa/.tmp/q/503965605ad7478bac0f5cb8035a11da as hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/hbase/quota/67f67293b31d223687049a16b9d906fa/q/503965605ad7478bac0f5cb8035a11da 2024-12-01T23:55:11,394 INFO [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 503965605ad7478bac0f5cb8035a11da 2024-12-01T23:55:11,394 INFO [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/hbase/quota/67f67293b31d223687049a16b9d906fa/q/503965605ad7478bac0f5cb8035a11da, entries=5, sequenceid=15, filesize=5.2 K 2024-12-01T23:55:11,396 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/hbase/quota/67f67293b31d223687049a16b9d906fa/.tmp/u/1c96db1bacaa4e55bbd7d8b4f644c658 as hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/hbase/quota/67f67293b31d223687049a16b9d906fa/u/1c96db1bacaa4e55bbd7d8b4f644c658 2024-12-01T23:55:11,415 INFO [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 1c96db1bacaa4e55bbd7d8b4f644c658 2024-12-01T23:55:11,415 INFO [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/hbase/quota/67f67293b31d223687049a16b9d906fa/u/1c96db1bacaa4e55bbd7d8b4f644c658, entries=3, sequenceid=15, filesize=5.1 K 2024-12-01T23:55:11,417 INFO [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~626 B/626, heapSize ~2.09 KB/2144, currentSize=0 B/0 for 67f67293b31d223687049a16b9d906fa in 535ms, sequenceid=15, compaction requested=false 2024-12-01T23:55:11,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39935 is added to blk_1073741851_1027 (size=5710) 2024-12-01T23:55:11,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46129 is added to blk_1073741851_1027 (size=5710) 2024-12-01T23:55:11,426 INFO [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=572 B at sequenceid=65 (bloomFilter=true), to=hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/hbase/meta/1588230740/.tmp/ns/e95ce363b73b40a6ba08d32dca98f1c5 2024-12-01T23:55:11,444 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/hbase/quota/67f67293b31d223687049a16b9d906fa/recovered.edits/18.seqid, newMaxSeqId=18, maxSeqId=1 2024-12-01T23:55:11,445 INFO [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed hbase:quota,,1733097237538.67f67293b31d223687049a16b9d906fa. 2024-12-01T23:55:11,445 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 67f67293b31d223687049a16b9d906fa: Waiting for close lock at 1733097310882Running coprocessor pre-close hooks at 1733097310882Disabling compacts and flushes for region at 1733097310882Disabling writes for close at 1733097310882Obtaining lock to block concurrent updates at 1733097310882Preparing flush snapshotting stores in 67f67293b31d223687049a16b9d906fa at 1733097310882Finished memstore snapshotting hbase:quota,,1733097237538.67f67293b31d223687049a16b9d906fa., syncing WAL and waiting on mvcc, flushsize=dataSize=626, getHeapSize=2144, getOffHeapSize=0, getCellsCount=14 at 1733097310883 (+1 ms)Flushing stores of hbase:quota,,1733097237538.67f67293b31d223687049a16b9d906fa. at 1733097310883Flushing 67f67293b31d223687049a16b9d906fa/q: creating writer at 1733097310884 (+1 ms)Flushing 67f67293b31d223687049a16b9d906fa/q: appending metadata at 1733097310905 (+21 ms)Flushing 67f67293b31d223687049a16b9d906fa/q: closing flushed file at 1733097310905Flushing 67f67293b31d223687049a16b9d906fa/u: creating writer at 1733097310937 (+32 ms)Flushing 67f67293b31d223687049a16b9d906fa/u: appending metadata at 1733097310953 (+16 ms)Flushing 67f67293b31d223687049a16b9d906fa/u: closing flushed file at 1733097310953Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6aad713a: reopening flushed file at 1733097311381 (+428 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@f6edd38: reopening flushed file at 1733097311394 (+13 ms)Finished flush of dataSize ~626 B/626, heapSize ~2.09 KB/2144, currentSize=0 B/0 for 67f67293b31d223687049a16b9d906fa in 535ms, sequenceid=15, compaction requested=false at 1733097311417 (+23 ms)Writing region close event to WAL at 1733097311439 (+22 ms)Running coprocessor post-close hooks at 1733097311445 (+6 ms)Closed at 1733097311445 2024-12-01T23:55:11,445 DEBUG [RS_CLOSE_REGION-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:quota,,1733097237538.67f67293b31d223687049a16b9d906fa. 2024-12-01T23:55:11,469 DEBUG [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/hbase/meta/1588230740/.tmp/rep_barrier/486d3ae7a27a4dffa8517fba440b4cc1 is 101, key is TestNs:TestTable,1,1733097241623.b7dc35984ce3d77666ebbdeb398352af./rep_barrier:/1733097310302/DeleteFamily/seqid=0 2024-12-01T23:55:11,483 DEBUG [RS:0;579b0f681375:45401 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-01T23:55:11,483 INFO [RS:1;579b0f681375:44651 {}] regionserver.HRegionServer(976): stopping server 579b0f681375,44651,1733097235352; all regions closed. 2024-12-01T23:55:11,483 DEBUG [RS:1;579b0f681375:44651 {}] quotas.QuotaCache(122): Stopping QuotaRefresherChore chore. 2024-12-01T23:55:11,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46129 is added to blk_1073741852_1028 (size=5823) 2024-12-01T23:55:11,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39935 is added to blk_1073741852_1028 (size=5823) 2024-12-01T23:55:11,488 INFO [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=515 B at sequenceid=65 (bloomFilter=true), to=hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/hbase/meta/1588230740/.tmp/rep_barrier/486d3ae7a27a4dffa8517fba440b4cc1 2024-12-01T23:55:11,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46129 is added to blk_1073741834_1010 (size=3112) 2024-12-01T23:55:11,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39935 is added to blk_1073741834_1010 (size=3112) 2024-12-01T23:55:11,500 DEBUG [RS:1;579b0f681375:44651 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/oldWALs 2024-12-01T23:55:11,500 INFO [RS:1;579b0f681375:44651 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 579b0f681375%2C44651%2C1733097235352:(num 1733097236920) 2024-12-01T23:55:11,500 DEBUG [RS:1;579b0f681375:44651 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T23:55:11,500 INFO [RS:1;579b0f681375:44651 {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T23:55:11,500 INFO [RS:1;579b0f681375:44651 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-01T23:55:11,503 INFO [RS:1;579b0f681375:44651 {}] hbase.ChoreService(370): Chore service for: regionserver/579b0f681375:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-01T23:55:11,503 INFO [regionserver/579b0f681375:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-01T23:55:11,507 INFO [RS:1;579b0f681375:44651 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-01T23:55:11,507 INFO [RS:1;579b0f681375:44651 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-01T23:55:11,507 INFO [RS:1;579b0f681375:44651 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-01T23:55:11,507 INFO [RS:1;579b0f681375:44651 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-01T23:55:11,509 INFO [RS:1;579b0f681375:44651 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44651 2024-12-01T23:55:11,527 DEBUG [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/hbase/meta/1588230740/.tmp/table/78abc22f654e4d30ac6fc132649d1714 is 95, key is TestNs:TestTable,1,1733097241623.b7dc35984ce3d77666ebbdeb398352af./table:/1733097310302/DeleteFamily/seqid=0 2024-12-01T23:55:11,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39935 is added to blk_1073741853_1029 (size=5966) 2024-12-01T23:55:11,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46129 is added to blk_1073741853_1029 (size=5966) 2024-12-01T23:55:11,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40611-0x101949ab0000000, quorum=127.0.0.1:61873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-01T23:55:11,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44651-0x101949ab0000002, quorum=127.0.0.1:61873, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/579b0f681375,44651,1733097235352 2024-12-01T23:55:11,577 INFO [RS:1;579b0f681375:44651 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-01T23:55:11,578 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [579b0f681375,44651,1733097235352] 2024-12-01T23:55:11,593 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/579b0f681375,44651,1733097235352 already deleted, retry=false 2024-12-01T23:55:11,593 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 579b0f681375,44651,1733097235352 expired; onlineServers=1 2024-12-01T23:55:11,683 DEBUG [RS:0;579b0f681375:45401 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-01T23:55:11,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44651-0x101949ab0000002, quorum=127.0.0.1:61873, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T23:55:11,686 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44651-0x101949ab0000002, quorum=127.0.0.1:61873, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T23:55:11,686 INFO [RS:1;579b0f681375:44651 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-01T23:55:11,686 INFO [RS:1;579b0f681375:44651 {}] regionserver.HRegionServer(1031): Exiting; stopping=579b0f681375,44651,1733097235352; zookeeper connection closed. 2024-12-01T23:55:11,686 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@27b671a8 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@27b671a8 2024-12-01T23:55:11,800 INFO [regionserver/579b0f681375:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-01T23:55:11,807 INFO [regionserver/579b0f681375:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: SpaceQuotaRefresherChore was stopped 2024-12-01T23:55:11,807 INFO [regionserver/579b0f681375:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-01T23:55:11,884 DEBUG [RS:0;579b0f681375:45401 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-01T23:55:11,940 INFO [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.47 KB at sequenceid=65 (bloomFilter=true), to=hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/hbase/meta/1588230740/.tmp/table/78abc22f654e4d30ac6fc132649d1714 2024-12-01T23:55:11,949 DEBUG [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/hbase/meta/1588230740/.tmp/info/e6e14a7a0b224941a7fb8d04bedbf5b5 as hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/hbase/meta/1588230740/info/e6e14a7a0b224941a7fb8d04bedbf5b5 2024-12-01T23:55:11,958 INFO [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/hbase/meta/1588230740/info/e6e14a7a0b224941a7fb8d04bedbf5b5, entries=21, sequenceid=65, filesize=7.2 K 2024-12-01T23:55:11,960 DEBUG [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/hbase/meta/1588230740/.tmp/ns/e95ce363b73b40a6ba08d32dca98f1c5 as hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/hbase/meta/1588230740/ns/e95ce363b73b40a6ba08d32dca98f1c5 2024-12-01T23:55:11,976 INFO [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/hbase/meta/1588230740/ns/e95ce363b73b40a6ba08d32dca98f1c5, entries=8, sequenceid=65, filesize=5.6 K 2024-12-01T23:55:11,978 DEBUG [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/hbase/meta/1588230740/.tmp/rep_barrier/486d3ae7a27a4dffa8517fba440b4cc1 as hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/hbase/meta/1588230740/rep_barrier/486d3ae7a27a4dffa8517fba440b4cc1 2024-12-01T23:55:11,988 INFO [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/hbase/meta/1588230740/rep_barrier/486d3ae7a27a4dffa8517fba440b4cc1, entries=6, sequenceid=65, filesize=5.7 K 2024-12-01T23:55:11,989 DEBUG [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/hbase/meta/1588230740/.tmp/table/78abc22f654e4d30ac6fc132649d1714 as hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/hbase/meta/1588230740/table/78abc22f654e4d30ac6fc132649d1714 2024-12-01T23:55:11,999 INFO [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/hbase/meta/1588230740/table/78abc22f654e4d30ac6fc132649d1714, entries=12, sequenceid=65, filesize=5.8 K 2024-12-01T23:55:12,000 INFO [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~13.33 KB/13653, heapSize ~24.48 KB/25072, currentSize=0 B/0 for 1588230740 in 1116ms, sequenceid=65, compaction requested=false 2024-12-01T23:55:12,020 DEBUG [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/data/hbase/meta/1588230740/recovered.edits/68.seqid, newMaxSeqId=68, maxSeqId=1 2024-12-01T23:55:12,023 DEBUG [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-01T23:55:12,023 INFO [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-01T23:55:12,023 DEBUG [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733097310884Running coprocessor pre-close hooks at 1733097310884Disabling compacts and flushes for region at 1733097310884Disabling writes for close at 1733097310884Obtaining lock to block concurrent updates at 1733097310884Preparing flush snapshotting stores in 1588230740 at 1733097310884Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=13653, getHeapSize=25072, getOffHeapSize=0, getCellsCount=139 at 1733097310885 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733097310885Flushing 1588230740/info: creating writer at 1733097310885Flushing 1588230740/info: appending metadata at 1733097310907 (+22 ms)Flushing 1588230740/info: closing flushed file at 1733097310907Flushing 1588230740/ns: creating writer at 1733097311357 (+450 ms)Flushing 1588230740/ns: appending metadata at 1733097311378 (+21 ms)Flushing 1588230740/ns: closing flushed file at 1733097311378Flushing 1588230740/rep_barrier: creating writer at 1733097311443 (+65 ms)Flushing 1588230740/rep_barrier: appending metadata at 1733097311468 (+25 ms)Flushing 1588230740/rep_barrier: closing flushed file at 1733097311468Flushing 1588230740/table: creating writer at 1733097311504 (+36 ms)Flushing 1588230740/table: appending metadata at 1733097311526 (+22 ms)Flushing 1588230740/table: closing flushed file at 1733097311526Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1686245d: reopening flushed file at 1733097311948 (+422 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@395b4cca: reopening flushed file at 1733097311959 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4a706239: reopening flushed file at 1733097311976 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@48456747: reopening flushed file at 1733097311988 (+12 ms)Finished flush of dataSize ~13.33 KB/13653, heapSize ~24.48 KB/25072, currentSize=0 B/0 for 1588230740 in 1116ms, sequenceid=65, compaction requested=false at 1733097312001 (+13 ms)Writing region close event to WAL at 1733097312009 (+8 ms)Running coprocessor post-close hooks at 1733097312022 (+13 ms)Closed at 1733097312023 (+1 ms) 2024-12-01T23:55:12,023 DEBUG [RS_CLOSE_META-regionserver/579b0f681375:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-01T23:55:12,084 INFO [RS:0;579b0f681375:45401 {}] regionserver.HRegionServer(976): stopping server 579b0f681375,45401,1733097235193; all regions closed. 2024-12-01T23:55:12,084 DEBUG [RS:0;579b0f681375:45401 {}] quotas.QuotaCache(122): Stopping QuotaRefresherChore chore. 2024-12-01T23:55:12,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39935 is added to blk_1073741835_1011 (size=17505) 2024-12-01T23:55:12,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46129 is added to blk_1073741835_1011 (size=17505) 2024-12-01T23:55:12,091 DEBUG [RS:0;579b0f681375:45401 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/oldWALs 2024-12-01T23:55:12,091 INFO [RS:0;579b0f681375:45401 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 579b0f681375%2C45401%2C1733097235193.meta:.meta(num 1733097237274) 2024-12-01T23:55:12,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39935 is added to blk_1073741833_1009 (size=4034) 2024-12-01T23:55:12,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46129 is added to blk_1073741833_1009 (size=4034) 2024-12-01T23:55:12,100 DEBUG [RS:0;579b0f681375:45401 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/oldWALs 2024-12-01T23:55:12,100 INFO [RS:0;579b0f681375:45401 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 579b0f681375%2C45401%2C1733097235193:(num 1733097236920) 2024-12-01T23:55:12,100 DEBUG [RS:0;579b0f681375:45401 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T23:55:12,100 INFO [RS:0;579b0f681375:45401 {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T23:55:12,100 INFO [RS:0;579b0f681375:45401 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-01T23:55:12,100 INFO [RS:0;579b0f681375:45401 {}] hbase.ChoreService(370): Chore service for: regionserver/579b0f681375:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-01T23:55:12,100 INFO [RS:0;579b0f681375:45401 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-01T23:55:12,100 INFO [regionserver/579b0f681375:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-01T23:55:12,101 INFO [RS:0;579b0f681375:45401 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45401 2024-12-01T23:55:12,135 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40611-0x101949ab0000000, quorum=127.0.0.1:61873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-01T23:55:12,135 DEBUG [pool-50-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45401-0x101949ab0000001, quorum=127.0.0.1:61873, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/579b0f681375,45401,1733097235193 2024-12-01T23:55:12,135 INFO [RS:0;579b0f681375:45401 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-01T23:55:12,135 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [579b0f681375,45401,1733097235193] 2024-12-01T23:55:12,151 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/579b0f681375,45401,1733097235193 already deleted, retry=false 2024-12-01T23:55:12,151 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 579b0f681375,45401,1733097235193 expired; onlineServers=0 2024-12-01T23:55:12,152 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '579b0f681375,40611,1733097234530' ***** 2024-12-01T23:55:12,152 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-01T23:55:12,152 INFO [M:0;579b0f681375:40611 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-01T23:55:12,152 INFO [M:0;579b0f681375:40611 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-01T23:55:12,152 DEBUG [M:0;579b0f681375:40611 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-01T23:55:12,152 DEBUG [M:0;579b0f681375:40611 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-01T23:55:12,152 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-01T23:55:12,152 DEBUG [master/579b0f681375:0:becomeActiveMaster-HFileCleaner.small.0-1733097236517 {}] cleaner.HFileCleaner(306): Exit Thread[master/579b0f681375:0:becomeActiveMaster-HFileCleaner.small.0-1733097236517,5,FailOnTimeoutGroup] 2024-12-01T23:55:12,152 DEBUG [master/579b0f681375:0:becomeActiveMaster-HFileCleaner.large.0-1733097236516 {}] cleaner.HFileCleaner(306): Exit Thread[master/579b0f681375:0:becomeActiveMaster-HFileCleaner.large.0-1733097236516,5,FailOnTimeoutGroup] 2024-12-01T23:55:12,152 INFO [M:0;579b0f681375:40611 {}] hbase.ChoreService(370): Chore service for: master/579b0f681375:0 had [ScheduledChore name=QuotaObserverChore, period=60000, unit=MILLISECONDS, ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-01T23:55:12,152 INFO [M:0;579b0f681375:40611 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-01T23:55:12,152 DEBUG [M:0;579b0f681375:40611 {}] master.HMaster(1795): Stopping service threads 2024-12-01T23:55:12,153 INFO [M:0;579b0f681375:40611 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-01T23:55:12,153 INFO [M:0;579b0f681375:40611 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-01T23:55:12,153 ERROR [M:0;579b0f681375:40611 {}] procedure2.ProcedureExecutor(763): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[IPC Client (952710197) connection to localhost/127.0.0.1:38903 from jenkins,5,PEWorkerGroup] Thread[IPC Parameter Sending Thread for localhost/127.0.0.1:38903,5,PEWorkerGroup] Thread[HFileArchiver-1,5,PEWorkerGroup] Thread[HFileArchiver-2,5,PEWorkerGroup] Thread[HFileArchiver-3,5,PEWorkerGroup] Thread[HFileArchiver-4,5,PEWorkerGroup] Thread[HFileArchiver-5,5,PEWorkerGroup] 2024-12-01T23:55:12,153 INFO [M:0;579b0f681375:40611 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-01T23:55:12,154 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-01T23:55:12,160 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40611-0x101949ab0000000, quorum=127.0.0.1:61873, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-01T23:55:12,160 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40611-0x101949ab0000000, quorum=127.0.0.1:61873, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T23:55:12,160 DEBUG [M:0;579b0f681375:40611 {}] zookeeper.ZKUtil(347): master:40611-0x101949ab0000000, quorum=127.0.0.1:61873, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-01T23:55:12,160 WARN [M:0;579b0f681375:40611 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-01T23:55:12,161 INFO [M:0;579b0f681375:40611 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/.lastflushedseqids 2024-12-01T23:55:12,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39935 is added to blk_1073741854_1030 (size=134) 2024-12-01T23:55:12,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46129 is added to blk_1073741854_1030 (size=134) 2024-12-01T23:55:12,176 INFO [M:0;579b0f681375:40611 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-01T23:55:12,176 INFO [M:0;579b0f681375:40611 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-01T23:55:12,176 DEBUG [M:0;579b0f681375:40611 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-01T23:55:12,176 INFO [M:0;579b0f681375:40611 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T23:55:12,176 DEBUG [M:0;579b0f681375:40611 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T23:55:12,176 DEBUG [M:0;579b0f681375:40611 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-01T23:55:12,176 DEBUG [M:0;579b0f681375:40611 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T23:55:12,177 INFO [M:0;579b0f681375:40611 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=158.07 KB heapSize=192.26 KB 2024-12-01T23:55:12,199 DEBUG [M:0;579b0f681375:40611 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a45532b364e94fa18b569f2abe2e3ff2 is 82, key is hbase:meta,,1/info:regioninfo/1733097237351/Put/seqid=0 2024-12-01T23:55:12,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39935 is added to blk_1073741855_1031 (size=5672) 2024-12-01T23:55:12,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46129 is added to blk_1073741855_1031 (size=5672) 2024-12-01T23:55:12,210 INFO [M:0;579b0f681375:40611 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=390 (bloomFilter=true), to=hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a45532b364e94fa18b569f2abe2e3ff2 2024-12-01T23:55:12,243 INFO [RS:0;579b0f681375:45401 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-01T23:55:12,243 DEBUG [pool-50-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45401-0x101949ab0000001, quorum=127.0.0.1:61873, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T23:55:12,244 INFO [RS:0;579b0f681375:45401 {}] regionserver.HRegionServer(1031): Exiting; stopping=579b0f681375,45401,1733097235193; zookeeper connection closed. 2024-12-01T23:55:12,244 DEBUG [pool-50-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45401-0x101949ab0000001, quorum=127.0.0.1:61873, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T23:55:12,245 DEBUG [M:0;579b0f681375:40611 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7a62c800774f4c858c1fa6aaafdcb43b is 748, key is \x00\x00\x00\x00\x00\x00\x00\x11/proc:d/1733097242347/Put/seqid=0 2024-12-01T23:55:12,246 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2ca6b103 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2ca6b103 2024-12-01T23:55:12,246 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-12-01T23:55:12,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46129 is added to blk_1073741856_1032 (size=12663) 2024-12-01T23:55:12,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39935 is added to blk_1073741856_1032 (size=12663) 2024-12-01T23:55:12,260 INFO [M:0;579b0f681375:40611 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=157.45 KB at sequenceid=390 (bloomFilter=true), to=hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7a62c800774f4c858c1fa6aaafdcb43b 2024-12-01T23:55:12,270 INFO [M:0;579b0f681375:40611 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 7a62c800774f4c858c1fa6aaafdcb43b 2024-12-01T23:55:12,292 DEBUG [M:0;579b0f681375:40611 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a03a0ba7960e431b94166deb7822b73a is 69, key is 579b0f681375,44651,1733097235352/rs:state/1733097236646/Put/seqid=0 2024-12-01T23:55:12,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46129 is added to blk_1073741857_1033 (size=5224) 2024-12-01T23:55:12,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39935 is added to blk_1073741857_1033 (size=5224) 2024-12-01T23:55:12,711 INFO [M:0;579b0f681375:40611 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=390 (bloomFilter=true), to=hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a03a0ba7960e431b94166deb7822b73a 2024-12-01T23:55:12,718 DEBUG [M:0;579b0f681375:40611 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a45532b364e94fa18b569f2abe2e3ff2 as hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a45532b364e94fa18b569f2abe2e3ff2 2024-12-01T23:55:12,725 INFO [M:0;579b0f681375:40611 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a45532b364e94fa18b569f2abe2e3ff2, entries=8, sequenceid=390, filesize=5.5 K 2024-12-01T23:55:12,727 DEBUG [M:0;579b0f681375:40611 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7a62c800774f4c858c1fa6aaafdcb43b as hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/7a62c800774f4c858c1fa6aaafdcb43b 2024-12-01T23:55:12,738 INFO [M:0;579b0f681375:40611 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 7a62c800774f4c858c1fa6aaafdcb43b 2024-12-01T23:55:12,739 INFO [M:0;579b0f681375:40611 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/7a62c800774f4c858c1fa6aaafdcb43b, entries=44, sequenceid=390, filesize=12.4 K 2024-12-01T23:55:12,740 DEBUG [M:0;579b0f681375:40611 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a03a0ba7960e431b94166deb7822b73a as hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/a03a0ba7960e431b94166deb7822b73a 2024-12-01T23:55:12,746 INFO [M:0;579b0f681375:40611 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38903/user/jenkins/test-data/a24c7257-e3e5-9a1f-432a-e24e77af6f18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/a03a0ba7960e431b94166deb7822b73a, entries=2, sequenceid=390, filesize=5.1 K 2024-12-01T23:55:12,748 INFO [M:0;579b0f681375:40611 {}] regionserver.HRegion(3140): Finished flush of dataSize ~158.07 KB/161861, heapSize ~191.96 KB/196568, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 572ms, sequenceid=390, compaction requested=false 2024-12-01T23:55:12,749 INFO [M:0;579b0f681375:40611 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T23:55:12,749 DEBUG [M:0;579b0f681375:40611 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733097312176Disabling compacts and flushes for region at 1733097312176Disabling writes for close at 1733097312176Obtaining lock to block concurrent updates at 1733097312177 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733097312177Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=161861, getHeapSize=196808, getOffHeapSize=0, getCellsCount=449 at 1733097312177Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733097312178 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733097312178Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733097312198 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733097312198Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733097312218 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733097312244 (+26 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733097312244Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733097312270 (+26 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733097312292 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733097312292Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@b866dd: reopening flushed file at 1733097312717 (+425 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@b9f19f3: reopening flushed file at 1733097312726 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7e900730: reopening flushed file at 1733097312739 (+13 ms)Finished flush of dataSize ~158.07 KB/161861, heapSize ~191.96 KB/196568, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 572ms, sequenceid=390, compaction requested=false at 1733097312748 (+9 ms)Writing region close event to WAL at 1733097312749 (+1 ms)Closed at 1733097312749 2024-12-01T23:55:12,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46129 is added to blk_1073741830_1006 (size=187866) 2024-12-01T23:55:12,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39935 is added to blk_1073741830_1006 (size=187866) 2024-12-01T23:55:12,756 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-01T23:55:12,756 INFO [M:0;579b0f681375:40611 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-01T23:55:12,756 INFO [M:0;579b0f681375:40611 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40611 2024-12-01T23:55:12,756 INFO [M:0;579b0f681375:40611 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-01T23:55:12,875 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40611-0x101949ab0000000, quorum=127.0.0.1:61873, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T23:55:12,875 INFO [M:0;579b0f681375:40611 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-01T23:55:12,876 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40611-0x101949ab0000000, quorum=127.0.0.1:61873, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T23:55:12,880 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@58be3a95{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T23:55:12,882 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@296c17de{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T23:55:12,882 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T23:55:12,882 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@30b97b9d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-01T23:55:12,882 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@77d683b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/97d8b6f8-e647-7311-59ea-c760fa010fa3/hadoop.log.dir/,STOPPED} 2024-12-01T23:55:12,885 WARN [BP-1358816574-172.17.0.2-1733097230195 heartbeating to localhost/127.0.0.1:38903 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-01T23:55:12,885 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-01T23:55:12,885 WARN [BP-1358816574-172.17.0.2-1733097230195 heartbeating to localhost/127.0.0.1:38903 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1358816574-172.17.0.2-1733097230195 (Datanode Uuid c97d0686-e6f4-4994-9ab8-3ca0a242015c) service to localhost/127.0.0.1:38903 2024-12-01T23:55:12,885 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-01T23:55:12,886 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/97d8b6f8-e647-7311-59ea-c760fa010fa3/cluster_a168f772-ce25-779e-39a7-1ee39f6fa68c/data/data3/current/BP-1358816574-172.17.0.2-1733097230195 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T23:55:12,887 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/97d8b6f8-e647-7311-59ea-c760fa010fa3/cluster_a168f772-ce25-779e-39a7-1ee39f6fa68c/data/data4/current/BP-1358816574-172.17.0.2-1733097230195 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T23:55:12,887 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-01T23:55:12,889 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@71bdb202{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T23:55:12,889 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@69099528{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T23:55:12,889 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T23:55:12,890 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6dafd2a7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-01T23:55:12,890 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1aba4a96{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/97d8b6f8-e647-7311-59ea-c760fa010fa3/hadoop.log.dir/,STOPPED} 2024-12-01T23:55:12,891 WARN [BP-1358816574-172.17.0.2-1733097230195 heartbeating to localhost/127.0.0.1:38903 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-01T23:55:12,891 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-01T23:55:12,891 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-01T23:55:12,891 WARN [BP-1358816574-172.17.0.2-1733097230195 heartbeating to localhost/127.0.0.1:38903 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1358816574-172.17.0.2-1733097230195 (Datanode Uuid f55fbbc8-ba20-4f8f-aded-90ea86fc4d5f) service to localhost/127.0.0.1:38903 2024-12-01T23:55:12,892 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/97d8b6f8-e647-7311-59ea-c760fa010fa3/cluster_a168f772-ce25-779e-39a7-1ee39f6fa68c/data/data1/current/BP-1358816574-172.17.0.2-1733097230195 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T23:55:12,892 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/97d8b6f8-e647-7311-59ea-c760fa010fa3/cluster_a168f772-ce25-779e-39a7-1ee39f6fa68c/data/data2/current/BP-1358816574-172.17.0.2-1733097230195 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T23:55:12,893 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-01T23:55:12,911 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@358110da{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-01T23:55:12,911 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@105bda08{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T23:55:12,912 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T23:55:12,912 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ce21d39{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-01T23:55:12,912 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1e977c95{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/97d8b6f8-e647-7311-59ea-c760fa010fa3/hadoop.log.dir/,STOPPED} 2024-12-01T23:55:12,920 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-01T23:55:12,950 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down